From 38a8791aa7bfaa497e12e56b904a6b45986162d8 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 17 Aug 2017 16:37:49 -0400 Subject: [PATCH 001/232] drm/amdgpu: Fix huge page updates with CPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correctly detect system memory mappings when using CPU and don't use huge pages for them. Avoid incorrectly translating a physical page table GPU address when splitting a huge page while mapping system memory. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b1343e5541d3..ba475af993326 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1277,7 +1277,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type < CHIP_VEGA10 || nptes != AMDGPU_VM_PTE_COUNT(p->adev) || - p->func == amdgpu_vm_do_copy_ptes || + p->src || !(flags & AMDGPU_PTE_VALID)) { dst = amdgpu_bo_gpu_offset(entry->bo); @@ -1294,9 +1294,23 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, entry->addr = (dst | flags); if (use_cpu_update) { + /* In case a huge page is replaced with a system + * memory mapping, p->pages_addr != NULL and + * amdgpu_vm_cpu_set_ptes would try to translate dst + * through amdgpu_vm_map_gart. But dst is already a + * GPU address (of the page table). Disable + * amdgpu_vm_map_gart temporarily. + */ + dma_addr_t *tmp; + + tmp = p->pages_addr; + p->pages_addr = NULL; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + + p->pages_addr = tmp; } else { if (parent->bo->shadow) { pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); From ddbb5313502f89fb41dbec209427f1a21f628441 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 21 Aug 2017 09:51:10 +0800 Subject: [PATCH 002/232] drm/amdgpu/virtual_dce: Virtual display doesn't support disable vblank immediately MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For virtual display, it uses software timer to emulate the vsync interrupt, it doesn't have high precision, so doesn't support disable vblank immediately. BUG: SWDEV-129274 Signed-off-by: Emily Deng Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 4bdd851f56d08..538e5f27d1205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -221,8 +221,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) spin_lock_init(&adev->irq.lock); - /* Disable vblank irqs aggressively for power-saving */ - adev->ddev->vblank_disable_immediate = true; + if (!adev->enable_virtual_display) + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { From 9dd73b1e89d7eb3f5c0a00aa264e473364ba1aa6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 17 Aug 2017 15:25:04 +0800 Subject: [PATCH 003/232] drm/amd/powerplay: unhalt mec after loading Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 76347ff6d6554..c49a6f22002f7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->num_register_entries = 0; } - if (fw_type == UCODE_ID_RLC_G) + if ((fw_type == UCODE_ID_RLC_G) + || (fw_type == UCODE_ID_CP_MEC)) entry->flags = 1; else entry->flags = 0; From 2d6fb10565ca13e7a0f6f4725f1c3da5a99a2bcc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 18 Aug 2017 23:39:52 -0400 Subject: [PATCH 004/232] drm/amdgpu/gfx8: fix spelling typo in mqd allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 ++++++------ drivers/gpu/drm/amd/include/vi_structs.h | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 832e592fcd072..fc260c13b1da4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4579,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_misc_reserved = 0x00000003; if (!(adev->flags & AMD_IS_APU)) { mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); } eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; @@ -4768,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); @@ -4792,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index ca93b5160ba6c..3e606a761d0e6 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -419,8 +419,8 @@ struct vi_mqd_allocation { struct vi_mqd mqd; uint32_t wptr_poll_mem; uint32_t rptr_report_mem; - uint32_t dyamic_cu_mask; - uint32_t dyamic_rb_mask; + uint32_t dynamic_cu_mask; + uint32_t dynamic_rb_mask; }; struct cz_mqd { From 83e74db6a81daff277732bdd00b438ede2107c68 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 21 Aug 2017 11:58:25 -0400 Subject: [PATCH 005/232] drm/amdgpu: add automatic per asic settings for gart_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need a larger gart for asics that do not support GPUVM on all engines (e.g., MM) to make sure we have enough space for all gtt buffers in physical mode. Change the default size based on the asic type. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 16 +++++++++++++++- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12e71bbfd2228..103635ab784c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -76,7 +76,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; -extern unsigned amdgpu_gart_size; +extern int amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1a459ac63df45..f7ffb029f6d58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1062,11 +1062,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size < 32) { + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { /* gart size must be greater or equal to 32M */ dev_warn(adev->dev, "gart size (%d) too small\n", amdgpu_gart_size); - amdgpu_gart_size = 32; + amdgpu_gart_size = -1; } if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e39ec981b11c8..4ecf73787475b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -76,7 +76,7 @@ int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; -unsigned amdgpu_gart_size = 256; +int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; @@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +MODULE_PARM_DESC(gartsize, "Size of gart to setup in megabytes (32, 64, etc., -1=auto)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 94c1e2e8e34ca..b9b9f680fc084 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -65,7 +65,21 @@ */ void amdgpu_gart_set_defaults(struct amdgpu_device *adev) { - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; + u64 gart_size; + + if (amdgpu_gart_size == -1) { + /* make the GART larger for chips that + * dont' support VM for all rings + */ + if (adev->asic_type <= CHIP_STONEY) + gart_size = 1024; + else + gart_size = 256; + } else { + gart_size = amdgpu_gart_size; + } + + adev->mc.gart_size = gart_size << 20; } /** From cf273a59ca3068caced2adaf2deeb44d2013c8a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 18 Aug 2017 15:50:17 +0200 Subject: [PATCH 006/232] drm/amdgpu: fix and cleanup shadow handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set the shadow flag on the shadow and not the parent, always bind shadow BOs during allocation instead of manually, use the reservation_object wrappers to grab the lock. This fixes a couple of issues with binding the shadow BOs as well as correctly evicting them when memory becomes tight. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 46 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ---- 3 files changed, 23 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f7ffb029f6d58..e630d918fefc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, goto err; } - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); - if (r) { - DRM_ERROR("%p bind failed\n", bo->shadow); - goto err; - } - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, NULL, fence, true); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e7e899190befb..9e495da0bb03c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; - places[c].lpfn = 0; + if (flags & AMDGPU_GEM_CREATE_SHADOW) + places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; + else + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_TT; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | @@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, if (bo->shadow) return 0; - bo->flags |= AMDGPU_GEM_CREATE_SHADOW; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); - - amdgpu_ttm_placement_init(adev, &placement, - placements, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW); r = amdgpu_bo_create_restricted(adev, size, byte_align, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, NULL, &placement, bo->tbo.resv, 0, @@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; int r; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + domain, parent_flags); - amdgpu_ttm_placement_init(adev, &placement, - placements, domain, flags); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, init_value, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, + parent_flags, sg, &placement, resv, + init_value, bo_ptr); if (r) return r; - if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { - if (!resv) { - r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); - WARN_ON(r != 0); - } + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if (!resv) + WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, + NULL)); r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); if (!resv) - ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); + reservation_object_unlock((*bo_ptr)->tbo.resv); if (r) amdgpu_bo_unref(bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ba475af993326..96ec4e2b56e9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, unsigned i; int r; - if (parent->bo->shadow) { - struct amdgpu_bo *shadow = parent->bo->shadow; - - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } - if (use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, NULL); if (r) From f0694d3b8a7069a69d9eff9609ed1f8daa17886f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 21 Aug 2017 14:27:51 +0200 Subject: [PATCH 007/232] drm/amdgpu: discard commands of killed processes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a process is killed we shouldn't submit all waiting jobs, but instead clean up as fast as possible. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8b..97c94f9683fa0 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { struct amd_sched_rq *rq = entity->rq; + int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; - /** * The client will not queue more IBs during this fini, consume existing - * queued IBs + * queued IBs or discard them on SIGKILL */ - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); - + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + amd_sched_entity_is_idle(entity)); amd_sched_rq_remove_entity(rq, entity); + if (r) { + struct amd_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) + sched->ops->free_job(job); + + } kfifo_free(&entity->job_queue); } From 84d43463a2d09c28c9222fbb7d1082c078e2523a Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 22 Aug 2017 11:19:10 +0800 Subject: [PATCH 008/232] drm/amd/powerplay: ACG frequency added in PPTable Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 11 ++++++++--- drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h | 6 ++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 9d71a259d97d4..f8f02e70b8bc0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1558,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) */ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, - uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, + uint32_t *acg_freq) { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); @@ -1609,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, cpu_to_le16(dividers.usPll_ss_slew_frac); current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ + return 0; } @@ -1689,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) for (i = 0; i < dpm_table->count; i++) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[i].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; } @@ -1698,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) while (i < NUM_GFXCLK_DPM_LEVELS) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[j].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; i++; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index f6d6c61f796a4..2818c98ff5ca9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -315,10 +315,12 @@ typedef struct { uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; GbVdroopTable_t AcgBtcGbVdroopTable; QuadraticInt_t AcgAvfsGb; - uint32_t Reserved[4]; + + /* ACG Frequency Table, in Mhz */ + uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS]; /* Padding - ignore */ - uint32_t MmHubPadding[7]; /* SMU internal use */ + uint32_t MmHubPadding[3]; /* SMU internal use */ } PPTable_t; From a4da14cc6266a8b77e897750390cd4afe0e52344 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 22 Aug 2017 12:21:07 -0400 Subject: [PATCH 009/232] drm/amdgpu: refine default gart size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Be more explicit and add comments explaining each case. Also s/gart/GART/ in the parameter string as per Felix' suggestion. Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 38 ++++++++++++++++++++---- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4ecf73787475b..0f16986ec5bc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of gart to setup in megabytes (32, 64, etc., -1=auto)"); +MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index b9b9f680fc084..2027eb010a47b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -68,13 +68,39 @@ void amdgpu_gart_set_defaults(struct amdgpu_device *adev) u64 gart_size; if (amdgpu_gart_size == -1) { - /* make the GART larger for chips that - * dont' support VM for all rings - */ - if (adev->asic_type <= CHIP_STONEY) - gart_size = 1024; - else + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: /* no MM engines */ +#endif + case CHIP_TOPAZ: /* no MM engines */ + case CHIP_POLARIS11: /* all engines support GPUVM */ + case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS12: /* all engines support GPUVM */ + case CHIP_VEGA10: /* all engines support GPUVM */ + default: gart_size = 256; + break; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ +#endif + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ + case CHIP_RAVEN: /* DCE SG support */ + gart_size = 1024; + break; + } } else { gart_size = amdgpu_gart_size; } From c3db7b5a5591ede54fad5a4f5ea45f298e5d3470 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 22 Aug 2017 13:06:30 -0400 Subject: [PATCH 010/232] drm/amdgpu: move default gart size setting into gmc modules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the asic specific code into the IP modules. Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 52 ------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 1 - drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 19 ++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 22 +++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 21 +++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 16 +++++++- 6 files changed, 74 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 2027eb010a47b..f4370081f6e60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -56,58 +56,6 @@ * Common GART table functions. */ -/** - * amdgpu_gart_set_defaults - set the default gart_size - * - * @adev: amdgpu_device pointer - * - * Set the default gart_size based on parameters and available VRAM. - */ -void amdgpu_gart_set_defaults(struct amdgpu_device *adev) -{ - u64 gart_size; - - if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_HAINAN: /* no MM engines */ -#endif - case CHIP_TOPAZ: /* no MM engines */ - case CHIP_POLARIS11: /* all engines support GPUVM */ - case CHIP_POLARIS10: /* all engines support GPUVM */ - case CHIP_POLARIS12: /* all engines support GPUVM */ - case CHIP_VEGA10: /* all engines support GPUVM */ - default: - gart_size = 256; - break; -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ - case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ - case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ - case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ - case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ - case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ - case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ - case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ -#endif - case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ - case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ - case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ - case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ - case CHIP_RAVEN: /* DCE SG support */ - gart_size = 1024; - break; - } - } else { - gart_size = amdgpu_gart_size; - } - - adev->mc.gart_size = gart_size << 20; -} - /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index d4cce69362003..afbe803b1a13a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -56,7 +56,6 @@ struct amdgpu_gart { const struct amdgpu_gart_funcs *gart_funcs; }; -void amdgpu_gart_set_defaults(struct amdgpu_device *adev); int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 12b0c4cd7a5af..5be9c83dfcf7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -332,7 +332,24 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_HAINAN: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index e42c1ad3af5e0..eace9e7182c8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -386,7 +386,27 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_TOPAZ: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; +#endif + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 7ca2dae8237a0..3b3326daf32b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -562,7 +562,26 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_POLARIS11: /* all engines support GPUVM */ + case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS12: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2769c2b3b56e8..d04d0b1232120 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -499,7 +499,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_VEGA10: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_RAVEN: /* DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; From b249e18df151c9627af808321a8090c0b8d4d834 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 22 Aug 2017 16:39:30 -0400 Subject: [PATCH 011/232] drm/amdgpu: set sched_hw_submission higher for KIQ (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KIQ doesn't really use the GPU scheduler. The base drivers generally use the KIQ ring directly rather than submitting IBs. However, amdgpu_sched_hw_submission (which defaults to 2) limits the number of outstanding fences to 2. KFD uses the KIQ for TLB flushes and the 2 fence limit hurts performance when there are several KFD processes running. v2: move some expressions to one line change KIQ sched_hw_submission to at least 16 v3: bump to 256 Reviewed-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6c5646b48d1a5..5ce65280b3960 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned irq_type) { int r; + int sched_hw_submission = amdgpu_sched_hw_submission; + + /* Set the hw submission limit higher for KIQ because + * it's used for a number of gfx/compute tasks by both + * KFD and KGD which may have outstanding fences and + * it doesn't really use the gpu scheduler anyway; + * KIQ tasks get submitted directly to the ring. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + sched_hw_submission = max(sched_hw_submission, 256); if (ring->adev == NULL) { if (adev->num_rings >= AMDGPU_MAX_RINGS) @@ -178,8 +188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, - amdgpu_sched_hw_submission); + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); if (r) return r; } @@ -218,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_size = roundup_pow_of_two(max_dw * 4 * - amdgpu_sched_hw_submission); + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; ring->ptr_mask = ring->funcs->support_64bit_ptrs ? From 7e96a13523af12645b7e18d7cc268a95b72ff026 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 11 Aug 2017 13:50:51 +0800 Subject: [PATCH 012/232] drm/ttm: fix missing inc bo_count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d0459b392e5eb..c934ad5b39036 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -469,6 +469,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ + atomic_inc(&bo->glob->bo_count); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); From 172423bcc7654a9cc71ff208b8f797e5e11a08af Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 10 Aug 2017 20:38:41 +0800 Subject: [PATCH 013/232] drm/ttm:fix wrong decoding of bo_count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit we observe abnormal number from: /sys/devices/virtual/drm/amdttm/buffer_objects/bo_count bo_count is atomic_inc which is "int" type, shouldn't explicitly turn it to unsigned long. Signed-off-by: Monk Liu Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cba11f13d994f..180ce62964161 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,8 +109,8 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj, struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); - return snprintf(buffer, PAGE_SIZE, "%lu\n", - (unsigned long) atomic_read(&glob->bo_count)); + return snprintf(buffer, PAGE_SIZE, "%d\n", + atomic_read(&glob->bo_count)); } static struct attribute *ttm_bo_global_attrs[] = { From febb84a60304e7257d42b90cc191ef84279e5152 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 22 Aug 2017 12:50:46 +0200 Subject: [PATCH 014/232] drm/amdgpu: remove the GART copy hack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This isn't used since we don't map evicted BOs to GART any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 96ec4e2b56e9b..3bd430e180b51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1616,7 +1616,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to - * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update @@ -1630,7 +1629,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint64_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, @@ -1685,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } if (pages_addr) { - if (flags == gtt_flags) - src = adev->gart.table_addr + - (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; - else - max_entries = min(max_entries, 16ull * 1024ull); + max_entries = min(max_entries, 16ull * 1024ull); addr = 0; } else if (flags & AMDGPU_PTE_VALID) { addr += adev->vm_manager.vram_base_offset; @@ -1734,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - uint64_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive; + uint64_t flags; int r; if (clear || !bo_va->base.bo) { @@ -1757,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo) { + if (bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? - flags : 0; - } else { + else flags = 0x0; - gtt_flags = ~0x0; - } spin_lock(&vm->status_lock); if (!list_empty(&bo_va->base.vm_status)) @@ -1773,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, - gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, &bo_va->last_pt_update); if (r) From 9b0655e3adb6f741b79ce8758f77bf9e08e58243 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 22 Aug 2017 16:58:07 +0200 Subject: [PATCH 015/232] drm/amdgpu: fix amdgpu_ttm_bind MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use ttm_bo_mem_space instead of manually allocating GART space. This allows us to evict BOs when there isn't enought GART space any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 14 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 31 +++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 --- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 9e05e257729f2..0d15eb7d31d7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -108,10 +108,10 @@ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) * * Allocate the address space for a node. */ -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) +static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; @@ -143,12 +143,8 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, fpfn, lpfn, mode); spin_unlock(&mgr->lock); - if (!r) { + if (!r) mem->start = node->start; - if (&tbo->mem == mem) - tbo->offset = (tbo->mem.start << PAGE_SHIFT) + - tbo->bdev->man[tbo->mem.mem_type].gpu_offset; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8b2c294f6f799..1efe1cba7e114 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -824,20 +824,39 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg tmp; + + struct ttm_placement placement; + struct ttm_place placements; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) return 0; - r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, - NULL, bo_mem); - if (r) { - DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); + tmp = bo->mem; + tmp.mm_node = NULL; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + + r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); + if (unlikely(r)) return r; - } - return amdgpu_ttm_do_bind(ttm, bo_mem); + r = ttm_bo_move_ttm(bo, true, false, &tmp); + if (unlikely(r)) + ttm_bo_mem_put(bo, &tmp); + else + bo->offset = (bo->mem.start << PAGE_SHIFT) + + bo->bdev->man[bo->mem.mem_type].gpu_offset; + + return r; } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f22a4758719da..43093bffa2cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -62,10 +62,6 @@ extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); From 1cacc86a63abda5ff94a405765ca06b1c3a4031d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 22 Aug 2017 21:04:47 +0200 Subject: [PATCH 016/232] drm/amdgpu: inline amdgpu_ttm_do_bind again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function is called only once and doesn't do anything special. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 45 ++++++++++--------------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1efe1cba7e114..7ef6c28a34d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -761,35 +761,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - uint64_t flags; - int r; - - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); - gtt->offset = (u64)mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; - -} - static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; + uint64_t flags; int r = 0; if (gtt->userptr) { @@ -809,9 +785,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (amdgpu_gtt_mgr_is_allocated(bo_mem)) - r = amdgpu_ttm_do_bind(ttm, bo_mem); + if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) + return 0; + + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); return r; } From a3ce364558faa12c4f25466dfc89eb3146b8063c Mon Sep 17 00:00:00 2001 From: Roger He Date: Thu, 24 Aug 2017 14:57:57 +0800 Subject: [PATCH 017/232] drm/amd/amdgpu: fix BANK_SELECT on Vega10 (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BANK_SELECT should always be FRAGMENT_SIZE + 3 due to 8-entry (2^3) per cache line in L2 TLB for Vega10. v2: agd: fix warning Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 4f2788b61a08b..6c8040e616c4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,9 +143,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 4395a4f12149c..74cb647da30e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,9 +157,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); From 06f10a537ec1d5fe68dc889a9a5d11afa49e6e0d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 23 Aug 2017 07:52:36 +0200 Subject: [PATCH 018/232] drm/amdgpu: check memory allocation failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check memory allocation failure and return -ENOMEM in such a case. 'num_post_dep_syncobjs' still has to be set to 0 before the test in order to have it initialized if 'amdgpu_cs_parser_fini()' is called to free resources. The calling graph would be, in such a case! failure in amdgpu_cs_process_syncobj_out_dep() ---> error code returned by amdgpu_cs_dependencies() --> amdgpu_cs_parser_fini() is called Reviewed-by: Christian König Signed-off-by: Christophe JAILLET Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 269b835571eb0..d6ddd5562c16b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1079,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, GFP_KERNEL); p->num_post_dep_syncobjs = 0; + if (!p->post_dep_syncobjs) + return -ENOMEM; + for (i = 0; i < num_deps; ++i) { p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); if (!p->post_dep_syncobjs[i]) From fd4b5f54e1afae4717ed716c8fdd72cb531f808b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 23 Aug 2017 14:17:40 +0100 Subject: [PATCH 019/232] drm/amdgpu: remove duplicate return statement Remove a redundant identical return statement, it has no use. Detected by CoverityScan, CID#1454586 ("Structurally dead code") Reviewed-by: Felix Kuehling Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index fb6e5dbd5a035..309f2419c6d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -155,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; - return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) From 727030b0c6322b127b524dab1fbbe55a441a54b9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 18 Aug 2017 16:46:47 +0800 Subject: [PATCH 020/232] drm/amdgpu: support polaris10/11/12 new cp firmwares MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Newer versions of the CP firmware require changes in how the driver initializes the hw block. Change the firmware name for new firmware to maintain compatibility with older kernels. Acked-by: Christian König Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 65 ++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fc260c13b1da4..775484639f79a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -918,8 +918,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); - err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (err == -ENOENT) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + } + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + } if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.pfp_fw); @@ -929,8 +938,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); - err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (err == -ENOENT) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + } + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + } if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.me_fw); @@ -941,8 +959,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); - err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + if (err == -ENOENT) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + } + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + } if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.ce_fw); @@ -1012,8 +1039,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (err == -ENOENT) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + } + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + } if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.mec_fw); @@ -1025,8 +1061,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) if ((adev->asic_type != CHIP_STONEY) && (adev->asic_type != CHIP_TOPAZ)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (err == -ENOENT) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + } + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + } if (!err) { err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) From a92e145059cb883155a24a2d3ac33296d33d9df7 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 23 Aug 2017 15:17:47 -0400 Subject: [PATCH 021/232] drm/ttm: Add DMA map/unmap tracepoint (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also exports two functions that vendor drivers can call to trace DMA mappings. This is meant to help translate IOMMU mappings of bus addresses back to physical pages. Used by the umr amdgpu debugger for instance. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher (v2): Use dev_name() to get PCI path instead. (v3): Use correct types for dma/phys addresses --- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_debug.c | 74 +++++++++++++++++++++++ drivers/gpu/drm/ttm/ttm_trace.h | 87 +++++++++++++++++++++++++++ drivers/gpu/drm/ttm/ttm_tracepoints.c | 45 ++++++++++++++ include/drm/ttm/ttm_debug.h | 31 ++++++++++ 5 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_debug.c create mode 100644 drivers/gpu/drm/ttm/ttm_trace.h create mode 100644 drivers/gpu/drm/ttm/ttm_tracepoints.c create mode 100644 include/drm/ttm/ttm_debug.h diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 4d0c938ff4b21..a44fdfbe6351a 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -1,10 +1,11 @@ # # Makefile for the drm device driver. This driver provides support for the +ccflags-y := -I$(src)/. ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ - ttm_bo_manager.o ttm_page_alloc_dma.o + ttm_bo_manager.o ttm_page_alloc_dma.o ttm_debug.o ttm_tracepoints.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_debug.c b/drivers/gpu/drm/ttm/ttm_debug.c new file mode 100644 index 0000000000000..ef5f0d0901545 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_debug.c @@ -0,0 +1,74 @@ +/************************************************************************** + * + * Copyright (c) 2017 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Tom St Denis + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ttm_trace.h" + +void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + + if (unlikely(trace_ttm_dma_map_enabled())) { + for (i = 0; i < tt->ttm.num_pages; i++) { + trace_ttm_dma_map( + dev, + tt->ttm.pages[i], + tt->dma_address[i]); + } + } +} +EXPORT_SYMBOL(ttm_trace_dma_map); + +void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + + if (unlikely(trace_ttm_dma_unmap_enabled())) { + for (i = 0; i < tt->ttm.num_pages; i++) { + trace_ttm_dma_unmap( + dev, + tt->ttm.pages[i], + tt->dma_address[i]); + } + } +} +EXPORT_SYMBOL(ttm_trace_dma_unmap); + diff --git a/drivers/gpu/drm/ttm/ttm_trace.h b/drivers/gpu/drm/ttm/ttm_trace.h new file mode 100644 index 0000000000000..23279b9b8e645 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_trace.h @@ -0,0 +1,87 @@ +/************************************************************************** + * + * Copyright (c) 2017 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Tom St Denis + */ +#if !defined(_TTM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TTM_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ttm +#define TRACE_INCLUDE_FILE ttm_trace + +TRACE_EVENT(ttm_dma_map, + TP_PROTO(struct device *dev, struct page *page, dma_addr_t dma_address), + TP_ARGS(dev, page, dma_address), + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __field(dma_addr_t, dma) + __field(phys_addr_t, phys) + ), + TP_fast_assign( + __assign_str(device, dev_name(dev)); + __entry->dma = dma_address; + __entry->phys = page_to_phys(page); + ), + TP_printk("%s: %pad => %pa", + __get_str(device), + &__entry->dma, + &__entry->phys) +); + +TRACE_EVENT(ttm_dma_unmap, + TP_PROTO(struct device *dev, struct page *page, dma_addr_t dma_address), + TP_ARGS(dev, page, dma_address), + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __field(dma_addr_t, dma) + __field(phys_addr_t, phys) + ), + TP_fast_assign( + __assign_str(device, dev_name(dev)); + __entry->dma = dma_address; + __entry->phys = page_to_phys(page); + ), + TP_printk("%s: %pad => %pa", + __get_str(device), + &__entry->dma, + &__entry->phys) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include + diff --git a/drivers/gpu/drm/ttm/ttm_tracepoints.c b/drivers/gpu/drm/ttm/ttm_tracepoints.c new file mode 100644 index 0000000000000..861a6266822be --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_tracepoints.c @@ -0,0 +1,45 @@ +/************************************************************************** + * + * Copyright (c) 2017 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Tom St Denis + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "ttm_trace.h" diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h new file mode 100644 index 0000000000000..b5e460fa50864 --- /dev/null +++ b/include/drm/ttm/ttm_debug.h @@ -0,0 +1,31 @@ +/************************************************************************** + * + * Copyright (c) 2017 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Tom St Denis + */ +extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt); +extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt); From ca3670aa370e3cd01020decb308a0a5ddf193183 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 23 Aug 2017 15:33:40 -0400 Subject: [PATCH 022/232] drm/amd/amdgpu: Remove AMDGPU tracepoint and use new TTM tracepoint (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switches the AMDGPU driver over to the TTM tracepoint and removes our old one. Now you can enable traces before loading the module and trace all mappings. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher (v2): Use struct device instead of pci in trace. --- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 56 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 21 ++------- 2 files changed, 3 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 1c88bd5e29adc..b1f97417241d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -14,62 +14,6 @@ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) -TRACE_EVENT(amdgpu_ttm_tt_populate, - TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), - TP_ARGS(adev, dma_address, phys_address), - TP_STRUCT__entry( - __field(uint16_t, domain) - __field(uint8_t, bus) - __field(uint8_t, slot) - __field(uint8_t, func) - __field(uint64_t, dma) - __field(uint64_t, phys) - ), - TP_fast_assign( - __entry->domain = pci_domain_nr(adev->pdev->bus); - __entry->bus = adev->pdev->bus->number; - __entry->slot = PCI_SLOT(adev->pdev->devfn); - __entry->func = PCI_FUNC(adev->pdev->devfn); - __entry->dma = dma_address; - __entry->phys = phys_address; - ), - TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", - (unsigned)__entry->domain, - (unsigned)__entry->bus, - (unsigned)__entry->slot, - (unsigned)__entry->func, - (unsigned long long)__entry->dma, - (unsigned long long)__entry->phys) -); - -TRACE_EVENT(amdgpu_ttm_tt_unpopulate, - TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), - TP_ARGS(adev, dma_address, phys_address), - TP_STRUCT__entry( - __field(uint16_t, domain) - __field(uint8_t, bus) - __field(uint8_t, slot) - __field(uint8_t, func) - __field(uint64_t, dma) - __field(uint64_t, phys) - ), - TP_fast_assign( - __entry->domain = pci_domain_nr(adev->pdev->bus); - __entry->bus = adev->pdev->bus->number; - __entry->slot = PCI_SLOT(adev->pdev->devfn); - __entry->func = PCI_FUNC(adev->pdev->devfn); - __entry->dma = dma_address; - __entry->phys = phys_address; - ), - TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", - (unsigned)__entry->domain, - (unsigned)__entry->bus, - (unsigned)__entry->slot, - (unsigned)__entry->func, - (unsigned long long)__entry->dma, - (unsigned long long)__entry->phys) -); - TRACE_EVENT(amdgpu_mm_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d99..d1d94a14b089a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -667,32 +668,16 @@ static void amdgpu_trace_dma_map(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned i; - if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) { - for (i = 0; i < ttm->num_pages; i++) { - trace_amdgpu_ttm_tt_populate( - adev, - gtt->ttm.dma_address[i], - page_to_phys(ttm->pages[i])); - } - } + ttm_trace_dma_map(adev->dev, >t->ttm); } static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned i; - if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) { - for (i = 0; i < ttm->num_pages; i++) { - trace_amdgpu_ttm_tt_unpopulate( - adev, - gtt->ttm.dma_address[i], - page_to_phys(ttm->pages[i])); - } - } + ttm_trace_dma_unmap(adev->dev, >t->ttm); } /* prepare the sg table with the user pages */ From e719d5169f75ead3c05329b4125afb67b4f33eba Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 21 Aug 2017 15:43:32 -0400 Subject: [PATCH 023/232] drm/amd/include: Add hdmi_redriver_set to atomfirmware We'll need this for a some upcoming display changes Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 837296db9628b..7c92f47070850 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1017,6 +1017,19 @@ struct atom_14nm_combphy_tmds_vs_set uint8_t margin_deemph_lane0__deemph_sel_val; }; +struct atom_i2c_reg_info { + uint8_t ucI2cRegIndex; + uint8_t ucI2cRegVal; +}; + +struct atom_hdmi_retimer_redriver_set { + uint8_t HdmiSlvAddr; + uint8_t HdmiRegNum; + uint8_t Hdmi6GRegNum; + struct atom_i2c_reg_info HdmiRegSetting[9]; //For non 6G Hz use + struct atom_i2c_reg_info Hdmi6GhzRegSetting[3]; //For 6G Hz use. +}; + struct atom_integrated_system_info_v1_11 { struct atom_common_table_header table_header; @@ -1052,7 +1065,11 @@ struct atom_integrated_system_info_v1_11 struct atom_14nm_dpphy_dp_tuningset dp_tuningset; struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; struct atom_camera_data camera_info; - uint32_t reserved[138]; + struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0 + struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1 + struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2 + struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3 + uint32_t reserved[108]; }; From a4dec819c8bba6365eb893a4ca88db4dd1210110 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 18 Aug 2017 10:04:57 -0400 Subject: [PATCH 024/232] drm/ttm: Add helper functions to populate/map in one call (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions replace a section of common code found in radeon/amdgpu drivers (and possibly others) as part of the ttm_tt_*populate() callbacks. v2: squash in fix for sw iommu from Tom Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 41 ++++++++++++++++++++++++++++ include/drm/ttm/ttm_page_alloc.h | 21 ++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 8715998267730..6a660d196d877 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -920,6 +920,47 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + int r; + + r = ttm_pool_populate(&tt->ttm); + if (r) + return r; + + for (i = 0; i < tt->ttm.num_pages; i++) { + tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], + 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, tt->dma_address[i])) { + while (i--) { + dma_unmap_page(dev, tt->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + tt->dma_address[i] = 0; + } + ttm_pool_unpopulate(&tt->ttm); + return -EFAULT; + } + } + return 0; +} +EXPORT_SYMBOL(ttm_populate_and_map_pages); + +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + unsigned i; + + for (i = 0; i < tt->ttm.num_pages; i++) { + if (tt->dma_address[i]) { + dma_unmap_page(dev, tt->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + } + } + ttm_pool_unpopulate(&tt->ttm); +} +EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); + int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 49a828425fa2d..bf21166f2b970 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -83,6 +83,17 @@ extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); + +/** + * Populates and DMA maps pages to fullfil a ttm_dma_populate() request + */ +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); + +/** + * Unpopulates and DMA unmaps pages as part of a + * ttm_dma_unpopulate() request */ +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); + #else static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) @@ -105,6 +116,16 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { } + +static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) +{ + return -ENOMEM; +} + +static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) +{ +} + #endif #endif From 7405e0dad4c75b33976ddd997513635d7a0204b1 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 18 Aug 2017 10:05:48 -0400 Subject: [PATCH 025/232] drm/amd/amdgpu: Use new TTM populate/map helper function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 33 ++----------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d1d94a14b089a..ce384ed9b9efb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -926,7 +926,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -958,27 +957,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) } #endif - r = ttm_pool_populate(ttm); - if (r) { - return r; - } - - for (i = 0; i < ttm->num_pages; i++) { - gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { - while (i--) { - pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - gtt->ttm.dma_address[i] = 0; - } - ttm_pool_unpopulate(ttm); - return -EFAULT; - } - } - - r = 0; + r = ttm_populate_and_map_pages(adev->dev, >t->ttm); trace_mappings: if (likely(!r)) amdgpu_trace_dma_map(ttm); @@ -989,7 +968,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -1012,14 +990,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) } #endif - for (i = 0; i < ttm->num_pages; i++) { - if (gtt->ttm.dma_address[i]) { - pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - } - } - - ttm_pool_unpopulate(ttm); + ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); } int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, From f7871fd19389c5f64f625a4389675d0740f0dfe4 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 18 Aug 2017 10:06:34 -0400 Subject: [PATCH 026/232] drm/radeon: use new TTM populate/dma map helper functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ttm.c | 33 ++--------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index bf69bf9086bf2..8032da57e4090 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -725,8 +725,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_device *rdev; - unsigned i; - int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (ttm->state != tt_unpopulated) @@ -762,33 +760,13 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) } #endif - r = ttm_pool_populate(ttm); - if (r) { - return r; - } - - for (i = 0; i < ttm->num_pages; i++) { - gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { - while (i--) { - pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - gtt->ttm.dma_address[i] = 0; - } - ttm_pool_unpopulate(ttm); - return -EFAULT; - } - } - return 0; + return ttm_populate_and_map_pages(rdev->dev, >t->ttm); } static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); - unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -815,14 +793,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) } #endif - for (i = 0; i < ttm->num_pages; i++) { - if (gtt->ttm.dma_address[i]) { - pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - } - } - - ttm_pool_unpopulate(ttm); + ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm); } int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, From 96bec198352799794b0f8937620e811ef8b9fa22 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 24 Aug 2017 06:46:39 -0400 Subject: [PATCH 027/232] drm/ttm: Remove needless 'extern' on functions in header. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Minor tidy up. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_page_alloc.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index bf21166f2b970..38a2b4770c35f 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -extern int ttm_pool_populate(struct ttm_tt *ttm); +int ttm_pool_populate(struct ttm_tt *ttm); /** * ttm_pool_unpopulate: @@ -56,12 +56,12 @@ extern int ttm_pool_populate(struct ttm_tt *ttm); * * Free all pages of @ttm */ -extern void ttm_pool_unpopulate(struct ttm_tt *ttm); +void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Output the state of pools to debugfs file */ -extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); +int ttm_page_alloc_debugfs(struct seq_file *m, void *data); #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) @@ -78,10 +78,10 @@ void ttm_dma_page_alloc_fini(void); /** * Output the state of pools to debugfs file */ -extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); +int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); -extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); /** From db95e2185523ee9d46a13ceee37bffe8442d2e1c Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Tue, 22 Aug 2017 12:31:43 -0400 Subject: [PATCH 028/232] drm/amdgpu: Add debugfs file for VBIOS and version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add 2 debugfs files, one that contains the VBIOS version, and one that contains the VBIOS itself. These won't change after initialization, so we can add the VBIOS version when we parse the atombios information. This ensures that we can find out the VBIOS version, even when the dmesg buffer fills up, and makes it easier to associate which VBIOS version is for which GPU on mGPU configurations. Set the size to 20 characters in case of some weird VBIOS version that exceeds the expected 17 character format (3-8-3\0). The VBIOS dump also allows for easy debugging v2: Move to debugfs, clarify commit message, add VBIOS dump file Signed-off-by: Kent Russell Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 62 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/atom.c | 5 +- drivers/gpu/drm/amd/amdgpu/atom.h | 1 + 3 files changed, 67 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e630d918fefc0..da0a667a911b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -65,6 +65,8 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); +static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { "TAHITI", @@ -2201,6 +2203,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + r = amdgpu_debugfs_vbios_dump_init(adev); + if (r) + DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); + + r = amdgpu_debugfs_vbios_version_init(adev); + if (r) + DRM_ERROR("Creating vbios version debugfs failed (%d).\n", r); + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -3754,6 +3764,50 @@ int amdgpu_debugfs_init(struct drm_minor *minor) { return 0; } + +static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_write(m, adev->bios, adev->bios_size); + return 0; +} + +static int amdgpu_debugfs_get_vbios_version(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct atom_context *ctx = adev->mode_info.atom_context; + + seq_printf(m, "%s\n", ctx->vbios_version); + return 0; +} + +static const struct drm_info_list amdgpu_vbios_dump_list[] = { + {"amdgpu_vbios", + amdgpu_debugfs_get_vbios_dump, + 0, NULL}, +}; + +static const struct drm_info_list amdgpu_vbios_version_list[] = { + {"amdgpu_vbios_version", + amdgpu_debugfs_get_vbios_version, + 0, NULL}, +}; + +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) +{ + return amdgpu_debugfs_add_files(adev, + amdgpu_vbios_dump_list, 1); +} +static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev) +{ + return amdgpu_debugfs_add_files(adev, + amdgpu_vbios_version_list, 1); +} #else static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) { @@ -3763,5 +3817,13 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) +{ + return 0; +} +static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev) +{ + return 0; +} static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index d69aa2e179bbe..69500a8b4e2df 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) idx = 0x80; str = CSTR(idx); - if (*str != '\0') + if (*str != '\0') { pr_info("ATOM BIOS: %s\n", str); + strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); + } + return ctx; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index ddd8045accf3e..a39170991afe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -140,6 +140,7 @@ struct atom_context { int io_mode; uint32_t *scratch; int scratch_size_bytes; + char vbios_version[20]; }; extern int amdgpu_atom_debug; From 5b41d94cc409610aa74b320b57a436df05a23f0f Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Tue, 22 Aug 2017 12:31:43 -0400 Subject: [PATCH 029/232] drm/amdgpu: Move VBIOS version to sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sysfs is more stable, and doesn't require root to access Signed-off-by: Kent Russell Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 54 +++++++++------------- 1 file changed, 23 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index da0a667a911b0..1c5c44acaad2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -66,7 +66,6 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); -static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { "TAHITI", @@ -889,6 +888,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) return r; } +static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct atom_context *ctx = adev->mode_info.atom_context; + + return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); +} + +static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, + NULL); + /** * amdgpu_atombios_fini - free the driver info and callbacks for atombios * @@ -908,6 +921,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev) adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); adev->mode_info.atom_card_info = NULL; + device_remove_file(adev->dev, &dev_attr_vbios_version); } /** @@ -924,6 +938,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) { struct card_info *atom_card_info = kzalloc(sizeof(struct card_info), GFP_KERNEL); + int ret; if (!atom_card_info) return -ENOMEM; @@ -960,6 +975,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); } + + ret = device_create_file(adev->dev, &dev_attr_vbios_version); + if (ret) { + DRM_ERROR("Failed to create device file for VBIOS version\n"); + return ret; + } + return 0; } @@ -2207,10 +2229,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); - r = amdgpu_debugfs_vbios_version_init(adev); - if (r) - DRM_ERROR("Creating vbios version debugfs failed (%d).\n", r); - if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -3775,39 +3793,17 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) return 0; } -static int amdgpu_debugfs_get_vbios_version(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct atom_context *ctx = adev->mode_info.atom_context; - - seq_printf(m, "%s\n", ctx->vbios_version); - return 0; -} - static const struct drm_info_list amdgpu_vbios_dump_list[] = { {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump, 0, NULL}, }; -static const struct drm_info_list amdgpu_vbios_version_list[] = { - {"amdgpu_vbios_version", - amdgpu_debugfs_get_vbios_version, - 0, NULL}, -}; - static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) { return amdgpu_debugfs_add_files(adev, amdgpu_vbios_dump_list, 1); } -static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev) -{ - return amdgpu_debugfs_add_files(adev, - amdgpu_vbios_version_list, 1); -} #else static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) { @@ -3821,9 +3817,5 @@ static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) { return 0; } -static int amdgpu_debugfs_vbios_version_init(struct amdgpu_device *adev) -{ - return 0; -} static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif From 87f64a76b38acaa081b2bc46c3169884b9ccd6f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 23 Aug 2017 14:05:48 +0200 Subject: [PATCH 030/232] drm/amdgpu: fix amdgpu_vm_bo_map trace point MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That somehow got lost. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3bd430e180b51..db63ff5c80f20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2111,6 +2111,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); + trace_amdgpu_vm_bo_map(bo_va, mapping); return 0; } @@ -2176,6 +2177,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, if (flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); + trace_amdgpu_vm_bo_map(bo_va, mapping); return 0; } From 34d7be5dc28041b95254da517fd0f0f740544ece Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 24 Aug 2017 12:32:55 +0200 Subject: [PATCH 031/232] drm/amdgpu: fix and cleanup VM ready check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop checking the mapped BO itself, cause that one is certainly not a page table. Additional to that move the code into amdgpu_vm.c Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 33 ++----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e1..9b1b6bdd48410 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -127,35 +127,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return 0; } -static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo) -{ - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - return -ERESTARTSYS; - - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - return -ERESTARTSYS; - - return 0; -} - -static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct amdgpu_bo *bo = - container_of(entry->bo, struct amdgpu_bo, tbo); - if (amdgpu_gem_vm_check(NULL, bo)) - return false; - } - - return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL); -} - void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv) { @@ -189,7 +160,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, if (bo_va && --bo_va->ref_count == 0) { amdgpu_vm_bo_rmv(adev, bo_va); - if (amdgpu_gem_vm_ready(adev, vm, &list)) { + if (amdgpu_vm_ready(adev, vm)) { struct dma_fence *fence = NULL; r = amdgpu_vm_clear_freed(adev, vm, &fence); @@ -513,7 +484,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, { int r = -ERESTARTSYS; - if (!amdgpu_gem_vm_ready(adev, vm, list)) + if (!amdgpu_vm_ready(adev, vm)) goto error; r = amdgpu_vm_update_directories(adev, vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index db63ff5c80f20..67c37b22f8ef3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -231,6 +231,38 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, adev->mman.bdev.glob); } +/** + * amdgpu_vm_check - helper for amdgpu_vm_ready + */ +static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) +{ + /* if anything is swapped out don't swap it in here, + just abort and wait for the next CS */ + if (!amdgpu_bo_gpu_accessible(bo)) + return -ERESTARTSYS; + + if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) + return -ERESTARTSYS; + + return 0; +} + +/** + * amdgpu_vm_ready - check VM is ready for updates + * + * @adev: amdgpu device + * @vm: VM to check + * + * Check if all VM PDs/PTs are ready for updates + */ +bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + if (amdgpu_vm_check(NULL, vm->root.bo)) + return false; + + return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_vm_check, NULL); +} + /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ba6691b58ee72..9347d28c3c1e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -225,6 +225,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); +bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); From 6ac7defb5ccf2c5e7b3fc9eb648535f3b3d17bc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 23 Aug 2017 20:11:25 +0200 Subject: [PATCH 032/232] drm/amdgpu: cleanup GWS, GDS and OA allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Those are certainly not kernel allocations, instead set the NO_CPU_ACCESS flag. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9b1b6bdd48410..ba012933e6aaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -186,17 +186,17 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_gem_create *args = data; + uint64_t flags = args->in.domain_flags; uint64_t size = args->in.bo_size; struct drm_gem_object *gobj; uint32_t handle; - bool kernel = false; int r; /* reject invalid gem flags */ - if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED)) + if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_VRAM_CLEARED)) return -EINVAL; /* reject invalid gem domains */ @@ -211,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { - kernel = true; + flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) size = size << AMDGPU_GDS_SHIFT; else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) @@ -225,8 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, r = amdgpu_gem_object_create(adev, size, args->in.alignment, (u32)(0xffffffff & args->in.domains), - args->in.domain_flags, - kernel, &gobj); + flags, false, &gobj); if (r) return r; From ecf9d3448540830d2cd2fadd0cf70f236d1c5c6a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 Aug 2017 16:04:50 -0400 Subject: [PATCH 033/232] drm/amdgpu/powerplay/vega10: fix typo in register base index Probably a copy pasta. No functional difference, both have the same value. Reviewed-by: Felix Kuehling Reported-by: Michael von Khurja Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d44243441d284..d8551ae79f538 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -374,7 +374,7 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) uint32_t reg; reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); + mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); temp = cgs_read_register(hwmgr->device, reg); From 925d5d798f465671c6b8011e80c636da46ef1a16 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 Aug 2017 16:46:29 -0400 Subject: [PATCH 034/232] drm/amdgpu/gfx8: apply dynamic cu mask to APUs as well Confirmed with the hw team. It's the same for all asics. Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 775484639f79a..6666fcd8b08fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4622,12 +4622,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_static_thread_mgmt_se2 = 0xffffffff; mqd->compute_static_thread_mgmt_se3 = 0xffffffff; mqd->compute_misc_reserved = 0x00000003; - if (!(adev->flags & AMD_IS_APU)) { - mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); - mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); - } + mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); + mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); From 31bf29ab398be98891cc139ca6170575b973fa0d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 Aug 2017 16:47:15 -0400 Subject: [PATCH 035/232] drm/amdgpu/gfx8: drop cz mqd It was unused and according to hw team, it's the same for all asics in a gfx family so remove it. Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/vi_structs.h | 259 ----------------------- 1 file changed, 259 deletions(-) diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index 3e606a761d0e6..20234820194bd 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -423,265 +423,6 @@ struct vi_mqd_allocation { uint32_t dynamic_rb_mask; }; -struct cz_mqd { - uint32_t header; - uint32_t compute_dispatch_initiator; - uint32_t compute_dim_x; - uint32_t compute_dim_y; - uint32_t compute_dim_z; - uint32_t compute_start_x; - uint32_t compute_start_y; - uint32_t compute_start_z; - uint32_t compute_num_thread_x; - uint32_t compute_num_thread_y; - uint32_t compute_num_thread_z; - uint32_t compute_pipelinestat_enable; - uint32_t compute_perfcount_enable; - uint32_t compute_pgm_lo; - uint32_t compute_pgm_hi; - uint32_t compute_tba_lo; - uint32_t compute_tba_hi; - uint32_t compute_tma_lo; - uint32_t compute_tma_hi; - uint32_t compute_pgm_rsrc1; - uint32_t compute_pgm_rsrc2; - uint32_t compute_vmid; - uint32_t compute_resource_limits; - uint32_t compute_static_thread_mgmt_se0; - uint32_t compute_static_thread_mgmt_se1; - uint32_t compute_tmpring_size; - uint32_t compute_static_thread_mgmt_se2; - uint32_t compute_static_thread_mgmt_se3; - uint32_t compute_restart_x; - uint32_t compute_restart_y; - uint32_t compute_restart_z; - uint32_t compute_thread_trace_enable; - uint32_t compute_misc_reserved; - uint32_t compute_dispatch_id; - uint32_t compute_threadgroup_id; - uint32_t compute_relaunch; - uint32_t compute_wave_restore_addr_lo; - uint32_t compute_wave_restore_addr_hi; - uint32_t compute_wave_restore_control; - uint32_t reserved_39; - uint32_t reserved_40; - uint32_t reserved_41; - uint32_t reserved_42; - uint32_t reserved_43; - uint32_t reserved_44; - uint32_t reserved_45; - uint32_t reserved_46; - uint32_t reserved_47; - uint32_t reserved_48; - uint32_t reserved_49; - uint32_t reserved_50; - uint32_t reserved_51; - uint32_t reserved_52; - uint32_t reserved_53; - uint32_t reserved_54; - uint32_t reserved_55; - uint32_t reserved_56; - uint32_t reserved_57; - uint32_t reserved_58; - uint32_t reserved_59; - uint32_t reserved_60; - uint32_t reserved_61; - uint32_t reserved_62; - uint32_t reserved_63; - uint32_t reserved_64; - uint32_t compute_user_data_0; - uint32_t compute_user_data_1; - uint32_t compute_user_data_2; - uint32_t compute_user_data_3; - uint32_t compute_user_data_4; - uint32_t compute_user_data_5; - uint32_t compute_user_data_6; - uint32_t compute_user_data_7; - uint32_t compute_user_data_8; - uint32_t compute_user_data_9; - uint32_t compute_user_data_10; - uint32_t compute_user_data_11; - uint32_t compute_user_data_12; - uint32_t compute_user_data_13; - uint32_t compute_user_data_14; - uint32_t compute_user_data_15; - uint32_t cp_compute_csinvoc_count_lo; - uint32_t cp_compute_csinvoc_count_hi; - uint32_t reserved_83; - uint32_t reserved_84; - uint32_t reserved_85; - uint32_t cp_mqd_query_time_lo; - uint32_t cp_mqd_query_time_hi; - uint32_t cp_mqd_connect_start_time_lo; - uint32_t cp_mqd_connect_start_time_hi; - uint32_t cp_mqd_connect_end_time_lo; - uint32_t cp_mqd_connect_end_time_hi; - uint32_t cp_mqd_connect_end_wf_count; - uint32_t cp_mqd_connect_end_pq_rptr; - uint32_t cp_mqd_connect_end_pq_wptr; - uint32_t cp_mqd_connect_end_ib_rptr; - uint32_t reserved_96; - uint32_t reserved_97; - uint32_t cp_mqd_save_start_time_lo; - uint32_t cp_mqd_save_start_time_hi; - uint32_t cp_mqd_save_end_time_lo; - uint32_t cp_mqd_save_end_time_hi; - uint32_t cp_mqd_restore_start_time_lo; - uint32_t cp_mqd_restore_start_time_hi; - uint32_t cp_mqd_restore_end_time_lo; - uint32_t cp_mqd_restore_end_time_hi; - uint32_t reserved_106; - uint32_t reserved_107; - uint32_t gds_cs_ctxsw_cnt0; - uint32_t gds_cs_ctxsw_cnt1; - uint32_t gds_cs_ctxsw_cnt2; - uint32_t gds_cs_ctxsw_cnt3; - uint32_t reserved_112; - uint32_t reserved_113; - uint32_t cp_pq_exe_status_lo; - uint32_t cp_pq_exe_status_hi; - uint32_t cp_packet_id_lo; - uint32_t cp_packet_id_hi; - uint32_t cp_packet_exe_status_lo; - uint32_t cp_packet_exe_status_hi; - uint32_t gds_save_base_addr_lo; - uint32_t gds_save_base_addr_hi; - uint32_t gds_save_mask_lo; - uint32_t gds_save_mask_hi; - uint32_t ctx_save_base_addr_lo; - uint32_t ctx_save_base_addr_hi; - uint32_t reserved_126; - uint32_t reserved_127; - uint32_t cp_mqd_base_addr_lo; - uint32_t cp_mqd_base_addr_hi; - uint32_t cp_hqd_active; - uint32_t cp_hqd_vmid; - uint32_t cp_hqd_persistent_state; - uint32_t cp_hqd_pipe_priority; - uint32_t cp_hqd_queue_priority; - uint32_t cp_hqd_quantum; - uint32_t cp_hqd_pq_base_lo; - uint32_t cp_hqd_pq_base_hi; - uint32_t cp_hqd_pq_rptr; - uint32_t cp_hqd_pq_rptr_report_addr_lo; - uint32_t cp_hqd_pq_rptr_report_addr_hi; - uint32_t cp_hqd_pq_wptr_poll_addr_lo; - uint32_t cp_hqd_pq_wptr_poll_addr_hi; - uint32_t cp_hqd_pq_doorbell_control; - uint32_t cp_hqd_pq_wptr; - uint32_t cp_hqd_pq_control; - uint32_t cp_hqd_ib_base_addr_lo; - uint32_t cp_hqd_ib_base_addr_hi; - uint32_t cp_hqd_ib_rptr; - uint32_t cp_hqd_ib_control; - uint32_t cp_hqd_iq_timer; - uint32_t cp_hqd_iq_rptr; - uint32_t cp_hqd_dequeue_request; - uint32_t cp_hqd_dma_offload; - uint32_t cp_hqd_sema_cmd; - uint32_t cp_hqd_msg_type; - uint32_t cp_hqd_atomic0_preop_lo; - uint32_t cp_hqd_atomic0_preop_hi; - uint32_t cp_hqd_atomic1_preop_lo; - uint32_t cp_hqd_atomic1_preop_hi; - uint32_t cp_hqd_hq_status0; - uint32_t cp_hqd_hq_control0; - uint32_t cp_mqd_control; - uint32_t cp_hqd_hq_status1; - uint32_t cp_hqd_hq_control1; - uint32_t cp_hqd_eop_base_addr_lo; - uint32_t cp_hqd_eop_base_addr_hi; - uint32_t cp_hqd_eop_control; - uint32_t cp_hqd_eop_rptr; - uint32_t cp_hqd_eop_wptr; - uint32_t cp_hqd_eop_done_events; - uint32_t cp_hqd_ctx_save_base_addr_lo; - uint32_t cp_hqd_ctx_save_base_addr_hi; - uint32_t cp_hqd_ctx_save_control; - uint32_t cp_hqd_cntl_stack_offset; - uint32_t cp_hqd_cntl_stack_size; - uint32_t cp_hqd_wg_state_offset; - uint32_t cp_hqd_ctx_save_size; - uint32_t cp_hqd_gds_resource_state; - uint32_t cp_hqd_error; - uint32_t cp_hqd_eop_wptr_mem; - uint32_t cp_hqd_eop_dones; - uint32_t reserved_182; - uint32_t reserved_183; - uint32_t reserved_184; - uint32_t reserved_185; - uint32_t reserved_186; - uint32_t reserved_187; - uint32_t reserved_188; - uint32_t reserved_189; - uint32_t reserved_190; - uint32_t reserved_191; - uint32_t iqtimer_pkt_header; - uint32_t iqtimer_pkt_dw0; - uint32_t iqtimer_pkt_dw1; - uint32_t iqtimer_pkt_dw2; - uint32_t iqtimer_pkt_dw3; - uint32_t iqtimer_pkt_dw4; - uint32_t iqtimer_pkt_dw5; - uint32_t iqtimer_pkt_dw6; - uint32_t iqtimer_pkt_dw7; - uint32_t iqtimer_pkt_dw8; - uint32_t iqtimer_pkt_dw9; - uint32_t iqtimer_pkt_dw10; - uint32_t iqtimer_pkt_dw11; - uint32_t iqtimer_pkt_dw12; - uint32_t iqtimer_pkt_dw13; - uint32_t iqtimer_pkt_dw14; - uint32_t iqtimer_pkt_dw15; - uint32_t iqtimer_pkt_dw16; - uint32_t iqtimer_pkt_dw17; - uint32_t iqtimer_pkt_dw18; - uint32_t iqtimer_pkt_dw19; - uint32_t iqtimer_pkt_dw20; - uint32_t iqtimer_pkt_dw21; - uint32_t iqtimer_pkt_dw22; - uint32_t iqtimer_pkt_dw23; - uint32_t iqtimer_pkt_dw24; - uint32_t iqtimer_pkt_dw25; - uint32_t iqtimer_pkt_dw26; - uint32_t iqtimer_pkt_dw27; - uint32_t iqtimer_pkt_dw28; - uint32_t iqtimer_pkt_dw29; - uint32_t iqtimer_pkt_dw30; - uint32_t iqtimer_pkt_dw31; - uint32_t reserved_225; - uint32_t reserved_226; - uint32_t reserved_227; - uint32_t set_resources_header; - uint32_t set_resources_dw1; - uint32_t set_resources_dw2; - uint32_t set_resources_dw3; - uint32_t set_resources_dw4; - uint32_t set_resources_dw5; - uint32_t set_resources_dw6; - uint32_t set_resources_dw7; - uint32_t reserved_236; - uint32_t reserved_237; - uint32_t reserved_238; - uint32_t reserved_239; - uint32_t queue_doorbell_id0; - uint32_t queue_doorbell_id1; - uint32_t queue_doorbell_id2; - uint32_t queue_doorbell_id3; - uint32_t queue_doorbell_id4; - uint32_t queue_doorbell_id5; - uint32_t queue_doorbell_id6; - uint32_t queue_doorbell_id7; - uint32_t queue_doorbell_id8; - uint32_t queue_doorbell_id9; - uint32_t queue_doorbell_id10; - uint32_t queue_doorbell_id11; - uint32_t queue_doorbell_id12; - uint32_t queue_doorbell_id13; - uint32_t queue_doorbell_id14; - uint32_t queue_doorbell_id15; -}; - struct vi_ce_ib_state { uint32_t ce_ib_completion_status; uint32_t ce_constegnine_count; From 29696bd680f196f52c6d5eedc4da4ef0482d8bb9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 18 Aug 2017 23:36:08 -0400 Subject: [PATCH 036/232] drm/amdgpu/gfx9: update mqd to include dynamic CU mask Necessary for proper operation with KIQ. Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/v9_structs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index 9a9e6c7e89eae..56d79db316932 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -284,8 +284,8 @@ struct v9_mqd { uint32_t gds_save_mask_hi; uint32_t ctx_save_base_addr_lo; uint32_t ctx_save_base_addr_hi; - uint32_t reserved_126; - uint32_t reserved_127; + uint32_t dynamic_cu_mask_addr_lo; + uint32_t dynamic_cu_mask_addr_hi; uint32_t cp_mqd_base_addr_lo; uint32_t cp_mqd_base_addr_hi; uint32_t cp_hqd_active; From ffe6d881e99413f3ebdfebf99570c2e840879e68 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 18 Aug 2017 23:52:37 -0400 Subject: [PATCH 037/232] drm/amdgpu/gfx9: adjust mqd allocation size To allocate additional space for the dynamic cu masks. Confirmed with the hw team that we only need 1 dword for the mask. The mask is the same for each SE so you only need 1 dword. Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 25 +++++++++++++++++------- drivers/gpu/drm/amd/include/v9_structs.h | 8 ++++++++ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69182eeca264e..7c06d1b99d998 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1346,7 +1346,7 @@ static int gfx_v9_0_sw_init(void *handle) return r; /* create MQD for all compute queues as wel as KIQ for SRIOV case */ - r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd)); + r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); if (r) return r; @@ -2463,6 +2463,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_static_thread_mgmt_se3 = 0xffffffff; mqd->compute_misc_reserved = 0x00000003; + mqd->dynamic_cu_mask_addr_lo = + lower_32_bits(ring->mqd_gpu_addr + + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); + mqd->dynamic_cu_mask_addr_hi = + upper_32_bits(ring->mqd_gpu_addr + + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); + eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); @@ -2695,7 +2702,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; @@ -2707,7 +2714,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { - memset((void *)mqd, 0, sizeof(*mqd)); + memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); + ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v9_0_mqd_init(ring); @@ -2716,7 +2725,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); } return 0; @@ -2729,7 +2738,9 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) int mqd_idx = ring - &adev->gfx.compute_ring[0]; if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { - memset((void *)mqd, 0, sizeof(*mqd)); + memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); + ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v9_0_mqd_init(ring); @@ -2737,11 +2748,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index 56d79db316932..2fb25abaf7c8b 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -672,6 +672,14 @@ struct v9_mqd { uint32_t reserved_511; }; +struct v9_mqd_allocation { + struct v9_mqd mqd; + uint32_t wptr_poll_mem; + uint32_t rptr_report_mem; + uint32_t dynamic_cu_mask; + uint32_t dynamic_rb_mask; +}; + /* from vega10 all CSA format is shifted to chain ib compatible mode */ struct v9_ce_ib_state { /* section of non chained ib part */ From e154162ef75d9cc444f6107e1f6cca5efe8e9640 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 25 Aug 2017 15:51:03 +0800 Subject: [PATCH 038/232] drm/amd/powerplay: refine pp code for raven delete useless code. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 135 +++--------------- 1 file changed, 16 insertions(+), 119 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index edc5fb6412d95..3e443c11f47b9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -38,20 +38,17 @@ #include "pp_soc15.h" #define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define RAVEN_MINIMUM_ENGINE_CLOCK 800 //8Mhz, the low boundary of engine clock allowed on this chip +#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ #define SCLK_MIN_DIV_INTV_SHIFT 12 -#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 //100mhz +#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ #define SMC_RAM_END 0x40000 static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic; + + int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock_req); -struct phm_vq_budgeting_record rv_vqtable[] = { - /* _TBD - * CUs, SSP low, SSP High, Min Sclk Low, Min Sclk, High, AWD/non-AWD, DCLK, ECLK, Sustainable Sclk, Sustainable CUs */ - { 8, 0, 45, 0, 0, VQ_DisplayConfig_NoneAWD, 80000, 120000, 4, 0 }, -}; static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps) { @@ -70,101 +67,24 @@ static const struct rv_power_state *cast_const_rv_ps( return (struct rv_power_state *)hw_ps; } -static int rv_init_vq_budget_table(struct pp_hwmgr *hwmgr) -{ - uint32_t table_size, i; - struct phm_vq_budgeting_table *ptable; - uint32_t num_entries = ARRAY_SIZE(rv_vqtable); - - if (hwmgr->dyn_state.vq_budgeting_table != NULL) - return 0; - - table_size = sizeof(struct phm_vq_budgeting_table) + - sizeof(struct phm_vq_budgeting_record) * (num_entries - 1); - - ptable = kzalloc(table_size, GFP_KERNEL); - if (NULL == ptable) - return -ENOMEM; - - ptable->numEntries = (uint8_t) num_entries; - - for (i = 0; i < ptable->numEntries; i++) { - ptable->entries[i].ulCUs = rv_vqtable[i].ulCUs; - ptable->entries[i].ulSustainableSOCPowerLimitLow = rv_vqtable[i].ulSustainableSOCPowerLimitLow; - ptable->entries[i].ulSustainableSOCPowerLimitHigh = rv_vqtable[i].ulSustainableSOCPowerLimitHigh; - ptable->entries[i].ulMinSclkLow = rv_vqtable[i].ulMinSclkLow; - ptable->entries[i].ulMinSclkHigh = rv_vqtable[i].ulMinSclkHigh; - ptable->entries[i].ucDispConfig = rv_vqtable[i].ucDispConfig; - ptable->entries[i].ulDClk = rv_vqtable[i].ulDClk; - ptable->entries[i].ulEClk = rv_vqtable[i].ulEClk; - ptable->entries[i].ulSustainableSclk = rv_vqtable[i].ulSustainableSclk; - ptable->entries[i].ulSustainableCUs = rv_vqtable[i].ulSustainableCUs; - } - - hwmgr->dyn_state.vq_budgeting_table = ptable; - - return 0; -} - static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend); - struct cgs_system_info sys_info = {0}; - int result; - rv_hwmgr->ddi_power_gating_disabled = 0; - rv_hwmgr->bapm_enabled = 1; rv_hwmgr->dce_slow_sclk_threshold = 30000; - rv_hwmgr->disable_driver_thermal_policy = 1; rv_hwmgr->thermal_auto_throttling_treshold = 0; rv_hwmgr->is_nb_dpm_enabled = 1; rv_hwmgr->dpm_flags = 1; - rv_hwmgr->disable_smu_acp_s3_handshake = 1; - rv_hwmgr->disable_notify_smu_vpu_recovery = 0; rv_hwmgr->gfx_off_controled_by_driver = false; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicM3Arbiter); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDynamicPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ACP); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXDynamicMGPowerGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkThrottleLowNotification); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableVoltageIsland); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (!result) { - if (sys_info.value & AMD_PG_SUPPORT_GFX_DMG) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_GFXDynamicMGPowerGating); - } - + PHM_PlatformCaps_PowerPlaySupport); return 0; } @@ -256,14 +176,6 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk, clocks.dcefClockInSR / 100); - /* - if(!rv_data->isp_tileA_power_gated || !rv_data->isp_tileB_power_gated) { - if ((hwmgr->ispArbiter.iclk != 0) && (rv_data->ISPActualHardMinFreq != (hwmgr->ispArbiter.iclk / 100) )) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetHardMinIspclkByFreq, hwmgr->ispArbiter.iclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->ISPActualHardMinFreq), - } - } */ if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { @@ -279,7 +191,7 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetHardMinSocclkByFreq, hwmgr->gfx_arbiter.sclk_hard_min / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq); + rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq); } if ((hwmgr->gfx_arbiter.gfxclk != 0) && @@ -301,6 +213,7 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } + static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { @@ -313,6 +226,7 @@ static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetDisplayCount, num_of_active_displays); + return 0; } @@ -563,9 +477,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return result; } - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerPlaySupport); - rv_populate_clock_table(hwmgr); result = rv_get_system_info_data(hwmgr); @@ -624,8 +535,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - rv_init_vq_budget_table(hwmgr); - return result; } @@ -670,11 +579,6 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; } - if (NULL != hwmgr->dyn_state.vq_budgeting_table) { - kfree(hwmgr->dyn_state.vq_budgeting_table); - hwmgr->dyn_state.vq_budgeting_table = NULL; - } - kfree(hwmgr->backend); hwmgr->backend = NULL; @@ -711,18 +615,9 @@ static int rv_dpm_get_pp_table_entry_callback( { struct rv_power_state *rv_ps = cast_rv_ps(hw_ps); - const ATOM_PPLIB_CZ_CLOCK_INFO *rv_clock_info = clock_info; - - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - uint8_t clock_info_index = rv_clock_info->index; - - if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1)) - clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1); - - rv_ps->levels[index].engine_clock = table->entries[clock_info_index].clk; - rv_ps->levels[index].vddc_index = (uint8_t)table->entries[clock_info_index].v; + rv_ps->levels[index].engine_clock = 0; + rv_ps->levels[index].vddc_index = 0; rv_ps->level = index + 1; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { @@ -814,12 +709,12 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_rv_ps(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - level->coreClock = ps->levels[level_index].engine_clock; + level->coreClock = 30000; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) { - level->coreClock = ps->levels[i].engine_clock; + level->coreClock = 30000; break; } } @@ -829,8 +724,9 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1; level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk; - } else + } else { level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; + } level->nonLocalMemoryFreq = 0; level->nonLocalMemoryWidth = 0; @@ -1001,7 +897,8 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { - return -EINVAL; + clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */ + return 0; } static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr) From 3b4ca9e6494d19b152d1833fede533835e7b7c18 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 24 Aug 2017 13:29:52 +0800 Subject: [PATCH 039/232] drm/amd/powerplay: add dummy pp table for raven. (v2) As there is no PPTable in RV, it is difficult to cleanly decouple PPTABLE functionality in existing codes. v2: agd: squash in clean build fix Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/processpptables.c | 61 +++++++++++++++---- 1 file changed, 49 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 2716721e54535..0f61e670da321 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -24,7 +24,7 @@ #include #include #include - +#include #include "processpptables.h" #include #include @@ -790,6 +790,39 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( return pstate; } +static unsigned char soft_dummy_pp_table[] = { + 0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x18, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x8e, 0x01, 0x00, 0x00, 0xb8, 0x01, 0x00, 0x00, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c, + 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x70, 0x00, 0x91, 0xf4, 0x00, + 0x64, 0x00, 0x40, 0x19, 0x01, 0x5a, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a, + 0x00, 0x00, 0x09, 0x30, 0x75, 0x00, 0x30, 0x75, 0x00, 0x40, 0x9c, 0x00, 0x40, 0x9c, 0x00, 0x59, + 0xd8, 0x00, 0x59, 0xd8, 0x00, 0x91, 0xf4, 0x00, 0x91, 0xf4, 0x00, 0x0e, 0x28, 0x01, 0x0e, 0x28, + 0x01, 0x90, 0x5f, 0x01, 0x90, 0x5f, 0x01, 0x00, 0x77, 0x01, 0x00, 0x77, 0x01, 0xca, 0x91, 0x01, + 0xca, 0x91, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, + 0x7c, 0x00, 0x02, 0x70, 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, + 0x00, 0x07, 0x08, 0x08, 0x00, 0x08, 0x00, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, + 0x02, 0x04, 0x02, 0x00, 0x08, 0x40, 0x9c, 0x00, 0x30, 0x75, 0x00, 0x74, 0xb5, 0x00, 0xa0, 0x8c, + 0x00, 0x60, 0xea, 0x00, 0x74, 0xb5, 0x00, 0x0e, 0x28, 0x01, 0x60, 0xea, 0x00, 0x90, 0x5f, 0x01, + 0x40, 0x19, 0x01, 0xb2, 0xb0, 0x01, 0x90, 0x5f, 0x01, 0xc0, 0xd4, 0x01, 0x00, 0x77, 0x01, 0x5e, + 0xff, 0x01, 0xca, 0x91, 0x01, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, 0x7c, 0x00, 0x02, 0x70, + 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, 0x00, 0x07, 0x00, 0x08, + 0x80, 0x00, 0x30, 0x75, 0x00, 0x7e, 0x00, 0x40, 0x9c, 0x00, 0x7c, 0x00, 0x59, 0xd8, 0x00, 0x70, + 0x00, 0xdc, 0x0b, 0x01, 0x64, 0x00, 0x80, 0x38, 0x01, 0x5a, 0x00, 0x80, 0x38, 0x01, 0x52, 0x00, + 0x80, 0x38, 0x01, 0x4a, 0x00, 0x80, 0x38, 0x01, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c, + 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x74, 0x00, 0x91, 0xf4, 0x00, + 0x66, 0x00, 0x40, 0x19, 0x01, 0x58, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a, + 0x00 +}; static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( struct pp_hwmgr *hwmgr) @@ -799,12 +832,17 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( uint16_t size; if (!table_addr) { - table_addr = cgs_atom_get_data_table(hwmgr->device, - GetIndexIntoMasterTable(DATA, PowerPlayInfo), - &size, &frev, &crev); - - hwmgr->soft_pp_table = table_addr; - hwmgr->soft_pp_table_size = size; + if (hwmgr->chip_id == CHIP_RAVEN) { + table_addr = &soft_dummy_pp_table[0]; + hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; + hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); + } else { + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); + hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table_size = size; + } } return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; @@ -924,15 +962,14 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr, } } - if ((0 == result) && - (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) - result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware)); + if ((0 == result) && (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) { + if (hwmgr->chip_family < AMDGPU_FAMILY_RV) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware)); + } return result; } - - static int init_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table From 841e3be124b58c6e86fe288bce4dfe58d5574fd3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 25 Aug 2017 16:58:10 +0800 Subject: [PATCH 040/232] drm/amd/powerplay: notify smu once display changed on Rv. when User turn off display or screen idle timeout, smu need this message to start S0i2 entry. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 14 ++++++ .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 43 +++++++++++-------- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h | 4 +- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 + 4 files changed, 43 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fcc722ea76490..967f50f54384d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -323,6 +323,9 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr, int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, const struct amd_pp_display_configuration *display_config) { + int index = 0; + int number_of_active_display = 0; + PHM_FUNC_CHECK(hwmgr); if (display_config == NULL) @@ -330,6 +333,17 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, hwmgr->display_config = *display_config; + if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) + hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk); + + for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) { + if (hwmgr->display_config.displays[index].controller_id != 0) + number_of_active_display++; + } + + if (NULL != hwmgr->hwmgr_func->set_active_display_count) + hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display); + if (hwmgr->hwmgr_func->store_cc6_data == NULL) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 3e443c11f47b9..441a916ba3d65 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -76,6 +76,9 @@ static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) rv_hwmgr->is_nb_dpm_enabled = 1; rv_hwmgr->dpm_flags = 1; rv_hwmgr->gfx_off_controled_by_driver = false; + rv_hwmgr->need_min_deep_sleep_dcefclk = true; + rv_hwmgr->num_active_display = 0; + rv_hwmgr->deep_sleep_dcefclk = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); @@ -162,21 +165,12 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, struct pp_display_clock_request clock_req; clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; clock_req.clock_type = amd_pp_dcf_clock; clock_req.clock_freq_in_khz = clocks.dcefClock * 10; - if (clocks.dcefClock == 0 && clocks.dcefClockInSR == 0) - clock_req.clock_freq_in_khz = rv_data->dcf_actual_hard_min_freq; - PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), "Attempt to set DCF Clock Failed!", return -EINVAL); - if(rv_data->need_min_deep_sleep_dcefclk && 0 != clocks.dcefClockInSR) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetMinDeepSleepDcefclk, - clocks.dcefClockInSR / 100); - if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; @@ -213,26 +207,35 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } - -static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { - uint32_t num_of_active_displays = 0; - struct cgs_display_info info = {0}; + struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); - cgs_get_active_displays_info(hwmgr->device, &info); - num_of_active_displays = info.display_count; + if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { + rv_data->deep_sleep_dcefclk = clock/100; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetMinDeepSleepDcefclk, + rv_data->deep_sleep_dcefclk); + } + return 0; +} + +static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) +{ + struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + if (rv_data->num_active_display != count) { + rv_data->num_active_display = count; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetDisplayCount, - num_of_active_displays); + rv_data->num_active_display); + } return 0; } static const struct phm_master_table_item rv_set_power_state_list[] = { { NULL, rv_tf_set_clock_limit }, - { NULL, rv_tf_set_num_active_display }, { } }; @@ -955,6 +958,8 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = { .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage, .get_max_high_clocks = rv_get_max_high_clocks, .read_sensor = rv_read_sensor, + .set_active_display_count = rv_set_active_display_count, + .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, }; int rv_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h index 2472b50e54cfb..68d61bd95ca00 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h @@ -293,7 +293,9 @@ struct rv_hwmgr { DpmClocks_t clock_table; uint32_t active_process_mask; - bool need_min_deep_sleep_dcefclk; /* disabled by default */ + bool need_min_deep_sleep_dcefclk; + uint32_t deep_sleep_dcefclk; + uint32_t num_active_display; }; struct pp_hwmgr; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 91b0105e82403..b1a6372608de7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -378,6 +378,8 @@ struct pp_hwmgr_func { struct amd_pp_profile *request); int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); + int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); + int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); }; struct pp_table_func { From 08cab989f77582cb19df12d4a75a91b68b0017f6 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 29 Aug 2017 08:36:52 -0400 Subject: [PATCH 041/232] drm/amd/amdgpu: Add write() method to VRAM debugfs entry (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allows writing data to vram via debugfs. Signed-off-by: Tom St Denis Reviewed-by: Christian König (v2): Call get_user before holding spinlock. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 42 ++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ce384ed9b9efb..6ea96e1fb273a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1671,10 +1671,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, return result; } +static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + if (*pos >= adev->mc.mc_vram_size) + return -ENXIO; + + while (size) { + unsigned long flags; + uint32_t value; + + if (*pos >= adev->mc.mc_vram_size) + return result; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); + WREG32(mmMM_INDEX_HI, *pos >> 31); + WREG32(mmMM_DATA, value); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + static const struct file_operations amdgpu_ttm_vram_fops = { .owner = THIS_MODULE, .read = amdgpu_ttm_vram_read, - .llseek = default_llseek + .write = amdgpu_ttm_vram_write, + .llseek = default_llseek, }; #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS From 3d7d4d3a1b9f67c0caecf2b2aa8d7c347f074a33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 23 Aug 2017 16:13:33 +0200 Subject: [PATCH 042/232] drm/amdgpu: rework moved handling in the VM v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of using the vm_state use a separate flag to note that the BO was moved. v2: reorder patches to avoid temporary lockless access Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 ++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 67c37b22f8ef3..1b36c62997b3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1788,10 +1788,16 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, else flags = 0x0; - spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->base.vm_status)) + if (!clear && bo_va->base.moved) { + bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); - spin_unlock(&vm->status_lock); + + } else { + spin_lock(&vm->status_lock); + if (!list_empty(&bo_va->base.vm_status)) + list_splice_init(&bo_va->valids, &bo_va->invalids); + spin_unlock(&vm->status_lock); + } list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, @@ -2419,6 +2425,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; list_for_each_entry(bo_base, &bo->va, bo_list) { + bo_base->moved = true; spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9347d28c3c1e5..1b478e62a948a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -105,6 +105,9 @@ struct amdgpu_vm_bo_base { /* protected by spinlock */ struct list_head vm_status; + + /* protected by the BO being reserved */ + bool moved; }; struct amdgpu_vm_pt { From cb7b6ec2f8b8759b6b5beb4d17ea6984867a3296 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 15 Aug 2017 17:08:12 +0200 Subject: [PATCH 043/232] drm/amdgpu: add bo_va cleared flag again v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We changed this to use an extra list a while back, but for the next series I need a separate flag again. v2: reorder to avoid unlocked list access Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 35 ++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 -- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a288fa6d72c80..e613ba42f167e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -55,6 +55,9 @@ struct amdgpu_bo_va { /* mappings for this bo_va */ struct list_head invalids; struct list_head valids; + + /* If the mappings are cleared or filled */ + bool cleared; }; struct amdgpu_bo { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1b36c62997b3a..1334bbb82634b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1792,11 +1792,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); - } else { - spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->base.vm_status)) - list_splice_init(&bo_va->valids, &bo_va->invalids); - spin_unlock(&vm->status_lock); + } else if (bo_va->cleared != clear) { + list_splice_init(&bo_va->valids, &bo_va->invalids); } list_for_each_entry(mapping, &bo_va->invalids, list) { @@ -1807,25 +1804,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, return r; } - if (trace_amdgpu_vm_bo_mapping_enabled()) { - list_for_each_entry(mapping, &bo_va->valids, list) - trace_amdgpu_vm_bo_mapping(mapping); - - list_for_each_entry(mapping, &bo_va->invalids, list) - trace_amdgpu_vm_bo_mapping(mapping); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); } spin_lock(&vm->status_lock); - list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->base.vm_status); - if (clear) - list_add(&bo_va->base.vm_status, &vm->cleared); spin_unlock(&vm->status_lock); - if (vm->use_cpu_for_update) { - /* Flush HDP */ - mb(); - amdgpu_gart_flush_gpu_tlb(adev, 0); + list_splice_init(&bo_va->invalids, &bo_va->valids); + bo_va->cleared = clear; + + if (trace_amdgpu_vm_bo_mapping_enabled()) { + list_for_each_entry(mapping, &bo_va->valids, list) + trace_amdgpu_vm_bo_mapping(mapping); } return 0; @@ -2427,9 +2421,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, list_for_each_entry(bo_base, &bo->va, bo_list) { bo_base->moved = true; spin_lock(&bo_base->vm->status_lock); - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, - &bo_base->vm->moved); + list_move(&bo_base->vm_status, &bo_base->vm->moved); spin_unlock(&bo_base->vm->status_lock); } } @@ -2516,7 +2508,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->moved); - INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 1b478e62a948a..ff093d4b5e11a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,9 +129,6 @@ struct amdgpu_vm { /* BOs moved, but not yet updated in the PT */ struct list_head moved; - /* BOs cleared in the PT because of a move */ - struct list_head cleared; - /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; From 00b5cc83c443dcd351cb2b21055656e007992b54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 28 Aug 2017 14:46:40 +0200 Subject: [PATCH 044/232] drm/amdgpu: fix comment on amdgpu_bo_va MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Except for the reference count all other members are protected by the VM PD being reserved. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index e613ba42f167e..42492e63b3a29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -49,9 +49,11 @@ struct amdgpu_bo_va { struct amdgpu_vm_bo_base base; /* protected by bo being reserved */ - struct dma_fence *last_pt_update; unsigned ref_count; + /* all other members protected by the VM PD being reserved */ + struct dma_fence *last_pt_update; + /* mappings for this bo_va */ struct list_head invalids; struct list_head valids; From 3f3333f8a0e90ac26f84ed7b0aa344efce695c08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 3 Aug 2017 14:02:13 +0200 Subject: [PATCH 045/232] drm/amdgpu: track evicted page tables v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of validating all page tables when one was evicted, track which one needs a validation. v2: simplify amdgpu_vm_ready as well Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 227 ++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 16 +- 5 files changed, 119 insertions(+), 141 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d6ddd5562c16b..8bf178a912f22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -636,9 +636,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, p->bytes_moved_vis); - fpriv->vm.last_eviction_counter = - atomic64_read(&p->adev->num_evictions); - if (p->bo_list) { struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj; @@ -835,7 +832,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (!bo) continue; - amdgpu_vm_bo_invalidate(adev, bo); + amdgpu_vm_bo_invalidate(adev, bo, false); } } @@ -860,7 +857,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, } if (p->job->vm) { - p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo); + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); r = amdgpu_bo_vm_update_pte(p); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ba012933e6aaa..d02880640ee7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -160,7 +160,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, if (bo_va && --bo_va->ref_count == 0) { amdgpu_vm_bo_rmv(adev, bo_va); - if (amdgpu_vm_ready(adev, vm)) { + if (amdgpu_vm_ready(vm)) { struct dma_fence *fence = NULL; r = amdgpu_vm_clear_freed(adev, vm, &fence); @@ -481,10 +481,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct list_head *list, uint32_t operation) { - int r = -ERESTARTSYS; + int r; - if (!amdgpu_vm_ready(adev, vm)) - goto error; + if (!amdgpu_vm_ready(vm)) + return; r = amdgpu_vm_update_directories(adev, vm); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03c..52d0109c0d9c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -929,7 +929,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; abo = container_of(bo, struct amdgpu_bo, tbo); - amdgpu_vm_bo_invalidate(adev, abo); + amdgpu_vm_bo_invalidate(adev, abo, evict); amdgpu_bo_kunmap(abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1334bbb82634b..6ff3c1bf035e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -140,7 +140,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry) { - entry->robj = vm->root.bo; + entry->robj = vm->root.base.bo; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; @@ -148,61 +148,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } -/** - * amdgpu_vm_validate_layer - validate a single page table level - * - * @parent: parent page table level - * @validate: callback to do the validation - * @param: parameter for the validation callback - * - * Validate the page table BOs on command submission if neccessary. - */ -static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, - int (*validate)(void *, struct amdgpu_bo *), - void *param, bool use_cpu_for_update, - struct ttm_bo_global *glob) -{ - unsigned i; - int r; - - if (use_cpu_for_update) { - r = amdgpu_bo_kmap(parent->bo, NULL); - if (r) - return r; - } - - if (!parent->entries) - return 0; - - for (i = 0; i <= parent->last_entry_used; ++i) { - struct amdgpu_vm_pt *entry = &parent->entries[i]; - - if (!entry->bo) - continue; - - r = validate(param, entry->bo); - if (r) - return r; - - spin_lock(&glob->lru_lock); - ttm_bo_move_to_lru_tail(&entry->bo->tbo); - if (entry->bo->shadow) - ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); - spin_unlock(&glob->lru_lock); - - /* - * Recurse into the sub directory. This is harmless because we - * have only a maximum of 5 layers. - */ - r = amdgpu_vm_validate_level(entry, validate, param, - use_cpu_for_update, glob); - if (r) - return r; - } - - return r; -} - /** * amdgpu_vm_validate_pt_bos - validate the page table BOs * @@ -217,32 +162,43 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { - uint64_t num_evictions; + struct ttm_bo_global *glob = adev->mman.bdev.glob; + int r; - /* We only need to validate the page tables - * if they aren't already valid. - */ - num_evictions = atomic64_read(&adev->num_evictions); - if (num_evictions == vm->last_eviction_counter) - return 0; + spin_lock(&vm->status_lock); + while (!list_empty(&vm->evicted)) { + struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_bo *bo; - return amdgpu_vm_validate_level(&vm->root, validate, param, - vm->use_cpu_for_update, - adev->mman.bdev.glob); -} + bo_base = list_first_entry(&vm->evicted, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); -/** - * amdgpu_vm_check - helper for amdgpu_vm_ready - */ -static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) -{ - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - return -ERESTARTSYS; + bo = bo_base->bo; + BUG_ON(!bo); + if (bo->parent) { + r = validate(param, bo); + if (r) + return r; - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - return -ERESTARTSYS; + spin_lock(&glob->lru_lock); + ttm_bo_move_to_lru_tail(&bo->tbo); + if (bo->shadow) + ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + spin_unlock(&glob->lru_lock); + } + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(bo, NULL); + if (r) + return r; + } + + spin_lock(&vm->status_lock); + list_del_init(&bo_base->vm_status); + } + spin_unlock(&vm->status_lock); return 0; } @@ -250,17 +206,19 @@ static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) /** * amdgpu_vm_ready - check VM is ready for updates * - * @adev: amdgpu device * @vm: VM to check * * Check if all VM PDs/PTs are ready for updates */ -bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm) +bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - if (amdgpu_vm_check(NULL, vm->root.bo)) - return false; + bool ready; + + spin_lock(&vm->status_lock); + ready = list_empty(&vm->evicted); + spin_unlock(&vm->status_lock); - return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_vm_check, NULL); + return ready; } /** @@ -326,11 +284,11 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { - struct reservation_object *resv = vm->root.bo->tbo.resv; + struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; struct amdgpu_bo *pt; - if (!entry->bo) { + if (!entry->base.bo) { r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, level), AMDGPU_GPU_PAGE_SIZE, true, @@ -351,9 +309,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ - pt->parent = amdgpu_bo_ref(vm->root.bo); + pt->parent = amdgpu_bo_ref(vm->root.base.bo); - entry->bo = pt; + entry->base.vm = vm; + entry->base.bo = pt; + list_add_tail(&entry->base.bo_list, &pt->va); + INIT_LIST_HEAD(&entry->base.vm_status); entry->addr = 0; } @@ -1020,7 +981,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); + amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1059,10 +1020,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; - shadow = parent->bo->shadow; + shadow = parent->base.bo->shadow; if (vm->use_cpu_for_update) { - pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; @@ -1078,7 +1039,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* assume the worst case */ ndw += parent->last_entry_used * 6; - pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); if (shadow) { shadow_addr = amdgpu_bo_gpu_offset(shadow); @@ -1098,7 +1059,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* walk over the address space and update the directory */ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_bo *bo = parent->entries[pt_idx].bo; + struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo; uint64_t pde, pt; if (bo == NULL) @@ -1141,7 +1102,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, } if (count) { - if (vm->root.bo->shadow) + if (vm->root.base.bo->shadow) params.func(¶ms, last_shadow, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -1154,7 +1115,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, amdgpu_job_free(job); } else { amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, + amdgpu_sync_resv(adev, &job->sync, + parent->base.bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); if (shadow) amdgpu_sync_resv(adev, &job->sync, @@ -1167,7 +1129,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_bo_fence(parent->bo, fence, true); + amdgpu_bo_fence(parent->base.bo, fence, true); dma_fence_put(vm->last_dir_update); vm->last_dir_update = dma_fence_get(fence); dma_fence_put(fence); @@ -1180,7 +1142,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - if (!entry->bo) + if (!entry->base.bo) continue; r = amdgpu_vm_update_level(adev, vm, entry, level + 1); @@ -1213,7 +1175,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - if (!entry->bo) + if (!entry->base.bo) continue; entry->addr = ~0ULL; @@ -1268,7 +1230,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, *entry = &p->vm->root; while ((*entry)->entries) { idx = addr >> (p->adev->vm_manager.block_size * level--); - idx %= amdgpu_bo_size((*entry)->bo) / 8; + idx %= amdgpu_bo_size((*entry)->base.bo) / 8; *parent = *entry; *entry = &(*entry)->entries[idx]; } @@ -1304,7 +1266,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, p->src || !(flags & AMDGPU_PTE_VALID)) { - dst = amdgpu_bo_gpu_offset(entry->bo); + dst = amdgpu_bo_gpu_offset(entry->base.bo); dst = amdgpu_gart_get_vm_pde(p->adev, dst); flags = AMDGPU_PTE_VALID; } else { @@ -1330,18 +1292,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, tmp = p->pages_addr; p->pages_addr = NULL; - pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); p->pages_addr = tmp; } else { - if (parent->bo->shadow) { - pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); + if (parent->base.bo->shadow) { + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); } - pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); } @@ -1392,7 +1354,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if (entry->addr & AMDGPU_PDE_PTE) continue; - pt = entry->bo; + pt = entry->base.bo; if (use_cpu_update) { pe_start = (unsigned long)amdgpu_bo_kptr(pt); } else { @@ -1612,12 +1574,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv, + r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, owner); if (r) goto error_free; - r = reservation_object_reserve_shared(vm->root.bo->tbo.resv); + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); if (r) goto error_free; @@ -1632,7 +1594,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_bo_fence(vm->root.bo, f, true); + amdgpu_bo_fence(vm->root.base.bo, f, true); dma_fence_put(*fence); *fence = f; return 0; @@ -1927,7 +1889,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct reservation_object *resv = vm->root.bo->tbo.resv; + struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; @@ -2414,12 +2376,25 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo) + struct amdgpu_bo *bo, bool evicted) { struct amdgpu_vm_bo_base *bo_base; list_for_each_entry(bo_base, &bo->va, bo_list) { + struct amdgpu_vm *vm = bo_base->vm; + bo_base->moved = true; + if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + spin_lock(&bo_base->vm->status_lock); + list_move(&bo_base->vm_status, &vm->evicted); + spin_unlock(&bo_base->vm->status_lock); + continue; + } + + /* Don't add page tables to the moved state */ + if (bo->tbo.type == ttm_bo_type_kernel) + continue; + spin_lock(&bo_base->vm->status_lock); list_move(&bo_base->vm_status, &bo_base->vm->moved); spin_unlock(&bo_base->vm->status_lock); @@ -2507,6 +2482,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); + INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); @@ -2551,30 +2527,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, init_pde_value, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.base.bo); if (r) goto error_free_sched_entity; - r = amdgpu_bo_reserve(vm->root.bo, false); - if (r) - goto error_free_root; - - vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + vm->root.base.vm = vm; + list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); + INIT_LIST_HEAD(&vm->root.base.vm_status); if (vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(vm->root.bo, NULL); + r = amdgpu_bo_reserve(vm->root.base.bo, false); if (r) goto error_free_root; - } - amdgpu_bo_unreserve(vm->root.bo); + r = amdgpu_bo_kmap(vm->root.base.bo, NULL); + if (r) + goto error_free_root; + amdgpu_bo_unreserve(vm->root.base.bo); + } return 0; error_free_root: - amdgpu_bo_unref(&vm->root.bo->shadow); - amdgpu_bo_unref(&vm->root.bo); - vm->root.bo = NULL; + amdgpu_bo_unref(&vm->root.base.bo->shadow); + amdgpu_bo_unref(&vm->root.base.bo); + vm->root.base.bo = NULL; error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); @@ -2593,9 +2570,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) { unsigned i; - if (level->bo) { - amdgpu_bo_unref(&level->bo->shadow); - amdgpu_bo_unref(&level->bo); + if (level->base.bo) { + list_del(&level->base.bo_list); + list_del(&level->base.vm_status); + amdgpu_bo_unref(&level->base.bo->shadow); + amdgpu_bo_unref(&level->base.bo); } if (level->entries) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ff093d4b5e11a..4e465e817fe86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,12 +111,12 @@ struct amdgpu_vm_bo_base { }; struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_vm_bo_base base; + uint64_t addr; /* array of page tables, one for each directory entry */ - struct amdgpu_vm_pt *entries; - unsigned last_entry_used; + struct amdgpu_vm_pt *entries; + unsigned last_entry_used; }; struct amdgpu_vm { @@ -126,6 +126,9 @@ struct amdgpu_vm { /* protecting invalidated */ spinlock_t status_lock; + /* BOs who needs a validation */ + struct list_head evicted; + /* BOs moved, but not yet updated in the PT */ struct list_head moved; @@ -135,7 +138,6 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_vm_pt root; struct dma_fence *last_dir_update; - uint64_t last_eviction_counter; /* protecting freed */ spinlock_t freed_lock; @@ -225,7 +227,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm); +bool amdgpu_vm_ready(struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); @@ -250,7 +252,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, - struct amdgpu_bo *bo); + struct amdgpu_bo *bo, bool evicted); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, From c5927537dd5706b17affa8aeea28c7b19c8fbb42 Mon Sep 17 00:00:00 2001 From: Himanshu Jha Date: Tue, 29 Aug 2017 18:51:27 +0530 Subject: [PATCH 046/232] drm/amd: Remove null check before kfree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kfree on NULL pointer is a no-op and therefore checking is redundant. Reviewed-by: Christian König Signed-off-by: Himanshu Jha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 6 ++---- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8d1cf2d3e663a..f51b41f094ef2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_connector->edid) { - kfree(amdgpu_connector->edid); - amdgpu_connector->edid = NULL; - } + kfree(amdgpu_connector->edid); + amdgpu_connector->edid = NULL; } static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index c49a6f22002f7..f128b03f2327c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -607,10 +607,8 @@ int smu7_init(struct pp_smumgr *smumgr) int smu7_smu_fini(struct pp_smumgr *smumgr) { - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } + kfree(smumgr->backend); + smumgr->backend = NULL; cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } From ebe02de2c60caa3ee5a1b39c7c8b2a40e1fda2d8 Mon Sep 17 00:00:00 2001 From: Himanshu Jha Date: Tue, 29 Aug 2017 18:42:27 +0530 Subject: [PATCH 047/232] drm/amd/powerplay/hwmgr: Remove null check before kfree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kfree on NULL pointer is a no-op and therefore checking is redundant. Reviewed-by: Harry Wentland Reviewed-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Himanshu Jha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 6 +- .../drm/amd/powerplay/hwmgr/processpptables.c | 96 +++++++------------ .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 44 +++------ .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 12 +-- 4 files changed, 53 insertions(+), 105 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index bc839ff0bdd04..9f2c0378c0592 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1225,10 +1225,8 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; kfree(hwmgr->backend); hwmgr->backend = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 0f61e670da321..485f7ebdc754b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -1652,85 +1652,53 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_RAVEN) return 0; - if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { - kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); - hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; - } + kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); + hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; - if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - kfree(hwmgr->dyn_state.vddci_dependency_on_mclk); - hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.vddci_dependency_on_mclk); + hwmgr->dyn_state.vddci_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { - kfree(hwmgr->dyn_state.vddc_dependency_on_mclk); - hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.vddc_dependency_on_mclk); + hwmgr->dyn_state.vddc_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { - kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk); - hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; - } + kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk); + hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL; - if (NULL != hwmgr->dyn_state.valid_mclk_values) { - kfree(hwmgr->dyn_state.valid_mclk_values); - hwmgr->dyn_state.valid_mclk_values = NULL; - } + kfree(hwmgr->dyn_state.valid_mclk_values); + hwmgr->dyn_state.valid_mclk_values = NULL; - if (NULL != hwmgr->dyn_state.valid_sclk_values) { - kfree(hwmgr->dyn_state.valid_sclk_values); - hwmgr->dyn_state.valid_sclk_values = NULL; - } + kfree(hwmgr->dyn_state.valid_sclk_values); + hwmgr->dyn_state.valid_sclk_values = NULL; - if (NULL != hwmgr->dyn_state.cac_leakage_table) { - kfree(hwmgr->dyn_state.cac_leakage_table); - hwmgr->dyn_state.cac_leakage_table = NULL; - } + kfree(hwmgr->dyn_state.cac_leakage_table); + hwmgr->dyn_state.cac_leakage_table = NULL; - if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) { - kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table); - hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL; - } + kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table); + hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL; - if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table); - hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table); + hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table); - hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table); - hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table); + hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) { - kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table); - hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; - } + kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table); + hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL; - if (NULL != hwmgr->dyn_state.cac_dtp_table) { - kfree(hwmgr->dyn_state.cac_dtp_table); - hwmgr->dyn_state.cac_dtp_table = NULL; - } + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; - if (NULL != hwmgr->dyn_state.ppm_parameter_table) { - kfree(hwmgr->dyn_state.ppm_parameter_table); - hwmgr->dyn_state.ppm_parameter_table = NULL; - } + kfree(hwmgr->dyn_state.ppm_parameter_table); + hwmgr->dyn_state.ppm_parameter_table = NULL; - if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) { - kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); - hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; - } + kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk); + hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL; - if (NULL != hwmgr->dyn_state.vq_budgeting_table) { - kfree(hwmgr->dyn_state.vq_budgeting_table); - hwmgr->dyn_state.vq_budgeting_table = NULL; - } + kfree(hwmgr->dyn_state.vq_budgeting_table); + hwmgr->dyn_state.vq_budgeting_table = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 441a916ba3d65..d5a9c0792de00 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -552,35 +552,21 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - if (pinfo->vdd_dep_on_dcefclk) { - kfree(pinfo->vdd_dep_on_dcefclk); - pinfo->vdd_dep_on_dcefclk = NULL; - } - if (pinfo->vdd_dep_on_socclk) { - kfree(pinfo->vdd_dep_on_socclk); - pinfo->vdd_dep_on_socclk = NULL; - } - if (pinfo->vdd_dep_on_fclk) { - kfree(pinfo->vdd_dep_on_fclk); - pinfo->vdd_dep_on_fclk = NULL; - } - if (pinfo->vdd_dep_on_dispclk) { - kfree(pinfo->vdd_dep_on_dispclk); - pinfo->vdd_dep_on_dispclk = NULL; - } - if (pinfo->vdd_dep_on_dppclk) { - kfree(pinfo->vdd_dep_on_dppclk); - pinfo->vdd_dep_on_dppclk = NULL; - } - if (pinfo->vdd_dep_on_phyclk) { - kfree(pinfo->vdd_dep_on_phyclk); - pinfo->vdd_dep_on_phyclk = NULL; - } - - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } + kfree(pinfo->vdd_dep_on_dcefclk); + pinfo->vdd_dep_on_dcefclk = NULL; + kfree(pinfo->vdd_dep_on_socclk); + pinfo->vdd_dep_on_socclk = NULL; + kfree(pinfo->vdd_dep_on_fclk); + pinfo->vdd_dep_on_fclk = NULL; + kfree(pinfo->vdd_dep_on_dispclk); + pinfo->vdd_dep_on_dispclk = NULL; + kfree(pinfo->vdd_dep_on_dppclk); + pinfo->vdd_dep_on_dppclk = NULL; + kfree(pinfo->vdd_dep_on_phyclk); + pinfo->vdd_dep_on_phyclk = NULL; + + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; kfree(hwmgr->backend); hwmgr->backend = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c2743233ba10e..eb8a3ff70cf78 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2282,15 +2282,11 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; pp_smu7_thermal_fini(hwmgr); - if (NULL != hwmgr->backend) { - kfree(hwmgr->backend); - hwmgr->backend = NULL; - } + kfree(hwmgr->backend); + hwmgr->backend = NULL; return 0; } From ea09729c930223edf492d0ca647c27e7eb0ccb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 9 Aug 2017 14:15:46 +0200 Subject: [PATCH 048/232] drm/amdgpu: rework page directory filling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keep track off relocated PDs/PTs instead of walking and checking all PDs. v2: fix root PD handling Signed-off-by: Christian König Reviewed-by: Alex Deucher (v1) Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 89 +++++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 + 2 files changed, 63 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6ff3c1bf035e1..faa08d5728da2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -196,7 +196,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } spin_lock(&vm->status_lock); - list_del_init(&bo_base->vm_status); + list_move(&bo_base->vm_status, &vm->relocated); } spin_unlock(&vm->status_lock); @@ -314,8 +314,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->base.vm = vm; entry->base.bo = pt; list_add_tail(&entry->base.bo_list, &pt->va); - INIT_LIST_HEAD(&entry->base.vm_status); - entry->addr = 0; + spin_lock(&vm->status_lock); + list_add(&entry->base.vm_status, &vm->relocated); + spin_unlock(&vm->status_lock); + entry->addr = ~0ULL; } if (level < adev->vm_manager.num_level) { @@ -1000,18 +1002,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ static int amdgpu_vm_update_level(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent, - unsigned level) + struct amdgpu_vm_pt *parent) { struct amdgpu_bo *shadow; struct amdgpu_ring *ring = NULL; uint64_t pd_addr, shadow_addr = 0; - uint32_t incr = amdgpu_vm_bo_size(adev, level + 1); uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; unsigned count = 0, pt_idx, ndw = 0; struct amdgpu_job *job; struct amdgpu_pte_update_params params; struct dma_fence *fence = NULL; + uint32_t incr; int r; @@ -1059,12 +1060,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, /* walk over the address space and update the directory */ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo; + struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; + struct amdgpu_bo *bo = entry->base.bo; uint64_t pde, pt; if (bo == NULL) continue; + spin_lock(&vm->status_lock); + list_del_init(&entry->base.vm_status); + spin_unlock(&vm->status_lock); + pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); /* Don't update huge pages here */ @@ -1075,6 +1081,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; pde = pd_addr + pt_idx * 8; + incr = amdgpu_bo_size(bo); if (((last_pde + 8 * count) != pde) || ((last_pt + incr * count) != pt) || (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { @@ -1135,20 +1142,6 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, dma_fence_put(fence); } } - /* - * Recurse into the subdirectories. This recursion is harmless because - * we only have a maximum of 5 layers. - */ - for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - - if (!entry->base.bo) - continue; - - r = amdgpu_vm_update_level(adev, vm, entry, level + 1); - if (r) - return r; - } return 0; @@ -1164,7 +1157,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, * * Mark all PD level as invalid after an error. */ -static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) +static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent) { unsigned pt_idx; @@ -1179,7 +1173,10 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) continue; entry->addr = ~0ULL; - amdgpu_vm_invalidate_level(entry); + spin_lock(&vm->status_lock); + list_move(&entry->base.vm_status, &vm->relocated); + spin_unlock(&vm->status_lock); + amdgpu_vm_invalidate_level(vm, entry); } } @@ -1197,9 +1194,38 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, { int r; - r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); - if (r) - amdgpu_vm_invalidate_level(&vm->root); + spin_lock(&vm->status_lock); + while (!list_empty(&vm->relocated)) { + struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_bo *bo; + + bo_base = list_first_entry(&vm->relocated, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); + + bo = bo_base->bo->parent; + if (bo) { + struct amdgpu_vm_bo_base *parent; + struct amdgpu_vm_pt *pt; + + parent = list_first_entry(&bo->va, + struct amdgpu_vm_bo_base, + bo_list); + pt = container_of(parent, struct amdgpu_vm_pt, base); + + r = amdgpu_vm_update_level(adev, vm, pt); + if (r) { + amdgpu_vm_invalidate_level(vm, &vm->root); + return r; + } + spin_lock(&vm->status_lock); + } else { + spin_lock(&vm->status_lock); + list_del_init(&bo_base->vm_status); + } + } + spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ @@ -1601,7 +1627,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); - amdgpu_vm_invalidate_level(&vm->root); + amdgpu_vm_invalidate_level(vm, &vm->root); return r; } @@ -2391,9 +2417,13 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, continue; } - /* Don't add page tables to the moved state */ - if (bo->tbo.type == ttm_bo_type_kernel) + if (bo->tbo.type == ttm_bo_type_kernel) { + spin_lock(&bo_base->vm->status_lock); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, &vm->relocated); + spin_unlock(&bo_base->vm->status_lock); continue; + } spin_lock(&bo_base->vm->status_lock); list_move(&bo_base->vm_status, &bo_base->vm->moved); @@ -2483,6 +2513,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); + INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4e465e817fe86..c3753afe98530 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,6 +129,9 @@ struct amdgpu_vm { /* BOs who needs a validation */ struct list_head evicted; + /* PT BOs which relocated and their parent need an update */ + struct list_head relocated; + /* BOs moved, but not yet updated in the PT */ struct list_head moved; From 570144c6522f5f332635d20dfa278cfcc764229c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 30 Aug 2017 15:38:45 +0200 Subject: [PATCH 049/232] drm/amdgpu: cleanup the VM code a bit more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The src isn't used any more after GART hack removal. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index faa08d5728da2..1582feba92891 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1476,7 +1476,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to - * @src: address where to copy page table entries from * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @start: start of mapped range @@ -1490,7 +1489,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, @@ -1508,7 +1506,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; - params.src = src; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -1548,13 +1545,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* one PDE write for each huge page */ ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; - if (src) { - /* only copy commands needed */ - ndw += ncmds * 7; - - params.func = amdgpu_vm_do_copy_ptes; - - } else if (pages_addr) { + if (pages_addr) { /* copy commands needed */ ndw += ncmds * 7; @@ -1579,7 +1570,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.ib = &job->ibs[0]; - if (!src && pages_addr) { + if (pages_addr) { uint64_t *pte; unsigned i; @@ -1656,7 +1647,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct drm_mm_node *nodes, struct dma_fence **fence) { - uint64_t pfn, src = 0, start = mapping->start; + uint64_t pfn, start = mapping->start; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -1711,8 +1702,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr += pfn << PAGE_SHIFT; last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, exclusive, - src, pages_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -1973,7 +1963,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, if (vm->pte_support_ats) init_pte_value = AMDGPU_PTE_SYSTEM; - r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, + r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, mapping->start, mapping->last, init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); From 705e519e0ef1d1824c28ced3b1857a8608853dfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 8 Jun 2017 11:15:16 +0200 Subject: [PATCH 050/232] drm/amdgpu: move hw generation check into amdgpu_doorbell_init v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we can safely call it on SI as well. v2: fix type in commit message Signed-off-by: Christian König Reviewed-by: Andy Shevchenko Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1c5c44acaad2f..77a32b79e8f2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -403,6 +403,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev) */ static int amdgpu_doorbell_init(struct amdgpu_device *adev) { + /* No doorbell on SI hardware generation */ + if (adev->asic_type < CHIP_BONAIRE) { + adev->doorbell.base = 0; + adev->doorbell.size = 0; + adev->doorbell.num_doorbells = 0; + adev->doorbell.ptr = NULL; + return 0; + } + /* doorbell bar mapping */ adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); @@ -2075,9 +2084,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - if (adev->asic_type >= CHIP_BONAIRE) - /* doorbell bar mapping */ - amdgpu_doorbell_init(adev); + /* doorbell bar mapping */ + amdgpu_doorbell_init(adev); /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -2304,8 +2312,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rio_mem = NULL; iounmap(adev->rmmio); adev->rmmio = NULL; - if (adev->asic_type >= CHIP_BONAIRE) - amdgpu_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); amdgpu_debugfs_regs_cleanup(adev); } From 0f2fc435d837213202bec3b8e26fbb67a4d6df24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 31 Aug 2017 10:46:20 +0200 Subject: [PATCH 051/232] drm/amdgpu: fix new PD update code for Vega10 v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to refer to the parent instead of the root BO for multi level page tables on Vega10. Also don't set the PDE_PTE bit. v2: Don't set the PDE_PTE bit either. Signed-off-by: Christian König Reviewed-and-Tested-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1582feba92891..d3c48557555ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -309,7 +309,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ - pt->parent = amdgpu_bo_ref(vm->root.base.bo); + pt->parent = amdgpu_bo_ref(parent->base.bo); entry->base.vm = vm; entry->base.bo = pt; @@ -317,7 +317,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); - entry->addr = ~0ULL; + entry->addr = 0; } if (level < adev->vm_manager.num_level) { From 4f5839c56ec38e2f8fcc59ca0e01defa8702987b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 29 Aug 2017 16:07:31 +0200 Subject: [PATCH 052/232] drm/amdgpu: restrict userptr even more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't allow them to be GEM imported into another process. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d02880640ee7d..e32a2b55b54f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -112,7 +112,13 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; + struct mm_struct *mm; int r; + + mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); + if (mm && mm != current->mm) + return -EPERM; + r = amdgpu_bo_reserve(abo, false); if (r) return r; From 73fb16e7ebee12953de32a7a2552e0cf2bf74ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 16 Aug 2017 11:13:48 +0200 Subject: [PATCH 053/232] drm/amdgpu: add support for per VM BOs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per VM BOs are handled like VM PDs and PTs. They are always valid and don't need to be specified in the BO lists. v2: validate PDs/PTs first Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 79 ++++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 +- 3 files changed, 60 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 8bf178a912f22..b57adc0723cb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -822,7 +822,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); + r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d3c48557555ca..26eb7dce5fe55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -189,14 +189,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&glob->lru_lock); } - if (vm->use_cpu_for_update) { + if (bo->tbo.type == ttm_bo_type_kernel && + vm->use_cpu_for_update) { r = amdgpu_bo_kmap(bo, NULL); if (r) return r; } spin_lock(&vm->status_lock); - list_move(&bo_base->vm_status, &vm->relocated); + if (bo->tbo.type != ttm_bo_type_kernel) + list_move(&bo_base->vm_status, &vm->moved); + else + list_move(&bo_base->vm_status, &vm->relocated); } spin_unlock(&vm->status_lock); @@ -1985,20 +1989,23 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, } /** - * amdgpu_vm_clear_moved - clear moved BOs in the PT + * amdgpu_vm_handle_moved - handle moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm + * @sync: sync object to add fences to * - * Make sure all moved BOs are cleared in the PT. + * Make sure all BOs which are moved are updated in the PTs. * Returns 0 for success. * - * PTs have to be reserved and mutex must be locked! + * PTs have to be reserved! */ -int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync) +int amdgpu_vm_handle_moved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; + bool clear; int r = 0; spin_lock(&vm->status_lock); @@ -2007,7 +2014,10 @@ int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, true); + /* Per VM BOs never need to bo cleared in the page tables */ + clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv; + + r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; @@ -2059,6 +2069,37 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, return bo_va; } + +/** + * amdgpu_vm_bo_insert_mapping - insert a new mapping + * + * @adev: amdgpu_device pointer + * @bo_va: bo_va to store the address + * @mapping: the mapping to insert + * + * Insert a new mapping into all structures. + */ +static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + struct amdgpu_bo_va_mapping *mapping) +{ + struct amdgpu_vm *vm = bo_va->base.vm; + struct amdgpu_bo *bo = bo_va->base.bo; + + list_add(&mapping->list, &bo_va->invalids); + amdgpu_vm_it_insert(mapping, &vm->va); + + if (mapping->flags & AMDGPU_PTE_PRT) + amdgpu_vm_prt_get(adev); + + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + spin_lock(&vm->status_lock); + list_move(&bo_va->base.vm_status, &vm->moved); + spin_unlock(&vm->status_lock); + } + trace_amdgpu_vm_bo_map(bo_va, mapping); +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -2110,18 +2151,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (!mapping) return -ENOMEM; - INIT_LIST_HEAD(&mapping->list); mapping->start = saddr; mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->invalids); - amdgpu_vm_it_insert(mapping, &vm->va); - - if (flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); - trace_amdgpu_vm_bo_map(bo_va, mapping); + amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } @@ -2148,7 +2183,6 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo = bo_va->base.bo; - struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; int r; @@ -2182,12 +2216,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->invalids); - amdgpu_vm_it_insert(mapping, &vm->va); - - if (flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); - trace_amdgpu_vm_bo_map(bo_va, mapping); + amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } @@ -2402,7 +2431,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { spin_lock(&bo_base->vm->status_lock); - list_move(&bo_base->vm_status, &vm->evicted); + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&bo_base->vm_status, &vm->evicted); + else + list_move_tail(&bo_base->vm_status, + &vm->evicted); spin_unlock(&bo_base->vm->status_lock); continue; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index c3753afe98530..90b7741d024b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -249,8 +249,9 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); -int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync); +int amdgpu_vm_handle_moved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); From e1eb899b45781b9bb77e6d7772d6e67bb0bc1a18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 25 Aug 2017 09:14:43 +0200 Subject: [PATCH 054/232] drm/amdgpu: add IOCTL interface for per VM BOs v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the IOCTL interface so that applications can allocate per VM BOs. Still WIP since not all corner cases are tested yet, but this reduces average CS overhead for 10K BOs from 21ms down to 48us. v2: add some extra checks, remove the WIP tag v3: rename new flag to AMDGPU_GEM_CREATE_VM_ALWAYS_VALID Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 7 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 63 +++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 3 +- include/uapi/drm/amdgpu_drm.h | 2 + 5 files changed, 55 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 103635ab784c9..5809f55e0d9d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -455,9 +455,10 @@ struct amdgpu_sa_bo { */ void amdgpu_gem_force_release(struct amdgpu_device *adev); int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, - int alignment, u32 initial_domain, - u64 flags, bool kernel, - struct drm_gem_object **obj); + int alignment, u32 initial_domain, + u64 flags, bool kernel, + struct reservation_object *resv, + struct drm_gem_object **obj); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 9afa9c097e1f1..b6cb276f0a709 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED, - true, &gobj); + true, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e32a2b55b54f7..f1e61b3df6408 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, - int alignment, u32 initial_domain, - u64 flags, bool kernel, - struct drm_gem_object **obj) + int alignment, u32 initial_domain, + u64 flags, bool kernel, + struct reservation_object *resv, + struct drm_gem_object **obj) { - struct amdgpu_bo *robj; + struct amdgpu_bo *bo; int r; *obj = NULL; @@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, - flags, NULL, NULL, 0, &robj); + flags, NULL, resv, 0, &bo); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -71,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, } return r; } - *obj = &robj->gem_base; + *obj = &bo->gem_base; return 0; } @@ -119,6 +120,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, if (mm && mm != current->mm) return -EPERM; + if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && + abo->tbo.resv != vm->root.base.bo->tbo.resv) + return -EPERM; + r = amdgpu_bo_reserve(abo, false); if (r) return r; @@ -142,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry vm_pd; - struct list_head list; + struct list_head list, duplicates; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; int r; INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; tv.shared = true; @@ -156,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); + r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (r) { dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); @@ -191,9 +197,12 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_gem_create *args = data; uint64_t flags = args->in.domain_flags; uint64_t size = args->in.bo_size; + struct reservation_object *resv = NULL; struct drm_gem_object *gobj; uint32_t handle; int r; @@ -202,7 +211,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED)) + AMDGPU_GEM_CREATE_VRAM_CLEARED | + AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)) return -EINVAL; /* reject invalid gem domains */ @@ -229,9 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, } size = roundup(size, PAGE_SIZE); + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { + r = amdgpu_bo_reserve(vm->root.base.bo, false); + if (r) + return r; + + resv = vm->root.base.bo->tbo.resv; + } + r = amdgpu_gem_object_create(adev, size, args->in.alignment, (u32)(0xffffffff & args->in.domains), - flags, false, &gobj); + flags, false, resv, &gobj); + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { + if (!r) { + struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); + + abo->parent = amdgpu_bo_ref(vm->root.base.bo); + } + amdgpu_bo_unreserve(vm->root.base.bo); + } if (r) return r; @@ -273,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, } /* create a gem object to contain this object in */ - r = amdgpu_gem_object_create(adev, args->size, 0, - AMDGPU_GEM_DOMAIN_CPU, 0, - 0, &gobj); + r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, + 0, 0, NULL, &gobj); if (r) return r; @@ -527,7 +552,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo_list_entry vm_pd; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; - struct list_head list; + struct list_head list, duplicates; uint64_t va_flags; int r = 0; @@ -563,6 +588,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); if ((args->operation != AMDGPU_VA_OP_CLEAR) && !(args->flags & AMDGPU_VM_PAGE_PRT)) { gobj = drm_gem_object_lookup(filp, args->handle); @@ -579,7 +605,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_unref; @@ -645,6 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { + struct amdgpu_device *adev = dev->dev_private; struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *robj; @@ -692,6 +719,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; + if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) + amdgpu_vm_bo_invalidate(adev, robj, true); + amdgpu_bo_unreserve(robj); break; default: @@ -721,8 +751,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - ttm_bo_type_device, - &gobj); + false, NULL, &gobj); if (r) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 5b3f92891f899..7e0826469b5e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -136,7 +136,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || + bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); return drm_gem_prime_export(dev, gobj, flags); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 7b8fa11c2285b..e055776f2f4c6 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -87,6 +87,8 @@ extern "C" { #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) /* Flag that allocating the BO should use linear VRAM */ #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) +/* Flag that BO is always valid in this VM */ +#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) struct drm_amdgpu_gem_create_in { /** the requested memory size */ From fd8bf087dffc0bce047c5aea2afcb8f821e48db1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 29 Aug 2017 16:14:32 +0200 Subject: [PATCH 055/232] drm/amdgpu: bump version for support of local BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0f16986ec5bc4..792b11795a816 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -69,9 +69,10 @@ * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. * - 3.18.0 - Export gpu always on cu bitmap * - 3.19.0 - Add support for UVD MJPEG decode + * - 3.20.0 - Add support for local BOs */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 19 +#define KMS_DRIVER_MINOR 20 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; From 35c32f20a7e28b82b61d44a164b756d933bca4a4 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 08:41:54 -0400 Subject: [PATCH 056/232] drm/amd/amdgpu: Tidy up register list formatting. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 96 ++++++++++++++++++--------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7c06d1b99d998..8b31f8427d9aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -66,38 +66,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)}, - {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE), - SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)} + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)}, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) }, + { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE), + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) } }; static const u32 golden_settings_gc_9_0[] = From 91629eff745e4c27d6501f1949e502868d9d5706 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 08:50:23 -0400 Subject: [PATCH 057/232] drm/amd/amdgpu: Tidy up gfx_v9_0_ngg_en() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8b31f8427d9aa..44885ffaadb74 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1152,30 +1152,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; int r; - u32 data; - u32 size; - u32 base; + u32 data, base; if (!amdgpu_ngg) return 0; /* Program buffer size */ - data = 0; - size = adev->gfx.ngg.buf[NGG_PRIM].size / 256; - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size); - - size = adev->gfx.ngg.buf[NGG_POS].size / 256; - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size); - + data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, + adev->gfx.ngg.buf[NGG_PRIM].size >> 8); + data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, + adev->gfx.ngg.buf[NGG_POS].size >> 8); WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); - data = 0; - size = adev->gfx.ngg.buf[NGG_CNTL].size / 256; - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size); - - size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024; - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size); - + data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, + adev->gfx.ngg.buf[NGG_CNTL].size >> 8); + data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, + adev->gfx.ngg.buf[NGG_PARAM].size >> 10); WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); /* Program buffer base address */ From 0e5293d07b92dfc1ada2d4a6c9057c1ee8179399 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 08:59:00 -0400 Subject: [PATCH 058/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_save_restore_machine() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 44885ffaadb74..0d6a1b4075e98 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1764,11 +1764,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) { - u32 tmp = 0; - - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); - tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); + WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); } static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, From e24c7f06db418c34c179233724656ec0efae6bc8 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:01:11 -0400 Subject: [PATCH 059/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_sck_slow_down_on_power_up() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0d6a1b4075e98..5a301c865bf12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1842,16 +1842,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev uint32_t default_data = 0; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - - if (enable == true) { - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - if (default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } else { - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - if(default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } + data = REG_SET_FIELD(data, RLC_PG_CNTL, + SMU_CLK_SLOWDOWN_ON_PU_ENABLE, + enable ? 1 : 0); + if (default_data != data) + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, From b926fe8efc54062f984b70e79bd71dddba176816 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:02:33 -0400 Subject: [PATCH 060/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_sck_slow_down_on_power_down() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5a301c865bf12..1f95ca8e476bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1856,16 +1856,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad uint32_t default_data = 0; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - - if (enable == true) { - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - if(default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } else { - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - if(default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } + data = REG_SET_FIELD(data, RLC_PG_CNTL, + SMU_CLK_SLOWDOWN_ON_PD_ENABLE, + enable ? 1 : 0); + if(default_data != data) + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, From 54cfe0fc54a16de265aac79fb8c7c7cd3131c964 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:12:47 -0400 Subject: [PATCH 061/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_cp_power_gating() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1f95ca8e476bd..61b3362d3c20f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1870,16 +1870,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, uint32_t default_data = 0; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - - if (enable == true) { - data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; - if(default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } else { - data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; - if(default_data != data) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); - } + data = REG_SET_FIELD(data, RLC_PG_CNTL, + CP_PG_DISABLE, + enable ? 0 : 1); + if(default_data != data) + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, From f55ee212ee263de6f9a56530095bfafbecc5863d Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:14:41 -0400 Subject: [PATCH 062/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_gfx_cg_power_gating() Make it consistent in style with the other CG/PG enable functions... Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 61b3362d3c20f..b0805b1e7b299 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1883,10 +1883,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, uint32_t data, default_data; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - if (enable == true) - data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + data = REG_SET_FIELD(data, RLC_PG_CNTL, + GFX_POWER_GATING_ENABLE, + enable ? 1 : 0); if(default_data != data) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } From 513f81332a498ce989559435a74491897dda8af2 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:27:22 -0400 Subject: [PATCH 063/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_gfx_pipeline_powergating() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b0805b1e7b299..1a928f6dc8011 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1896,10 +1896,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, uint32_t data, default_data; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - if (enable == true) - data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + data = REG_SET_FIELD(data, RLC_PG_CNTL, + GFX_PIPELINE_PG_ENABLE, + enable ? 1 : 0); if(default_data != data) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); From 7915c8fd7ec70f65c66c268d8d19f563b5a0162d Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:31:01 -0400 Subject: [PATCH 064/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_gfx_static_mg_power_gating() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1a928f6dc8011..22346bbe9a4c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1913,10 +1913,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade uint32_t data, default_data; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - if (enable == true) - data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; + data = REG_SET_FIELD(data, RLC_PG_CNTL, + STATIC_PER_CU_PG_ENABLE, + enable ? 1 : 0); if(default_data != data) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } From e567fa69f2045b5484a232a51ebbfe92f7c65386 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:31:23 -0400 Subject: [PATCH 065/232] drm/amd/amdgpu: Tidy up gfx_v9_0_enable_gfx_dynamic_mg_power_gating() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 22346bbe9a4c5..16d1a429ec9fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1926,10 +1926,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad uint32_t data, default_data; default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); - if (enable == true) - data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; + data = REG_SET_FIELD(data, RLC_PG_CNTL, + DYN_PER_CU_PG_ENABLE, + enable ? 1 : 0); if(default_data != data) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } From b08796cee109b35996357286ca878611e9dae4a4 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:40:38 -0400 Subject: [PATCH 066/232] drm/amd/amdgpu: Tidy up gfx_v9_0_rlc_stop() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 16d1a429ec9fd..e216aa68f51fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1968,13 +1968,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); - WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp); - + WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_0_enable_gui_idle_interrupt(adev, false); - gfx_v9_0_wait_for_rlc_serdes(adev); } From 78888cff5cbaf5ec712b20a216f500b7d4493e77 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:43:47 -0400 Subject: [PATCH 067/232] drm/amd/amdgpu: Fix indentation in gfx_v9_0_mqd_init() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e216aa68f51fe..2ba02739a2280 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2489,10 +2489,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) DOORBELL_SOURCE, 0); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); - } - else + } else { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0); + } mqd->cp_hqd_pq_doorbell_control = tmp; From 2b9bdfa70faf8c00969f91d3c4548a0df6071b90 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 31 Aug 2017 09:48:11 -0400 Subject: [PATCH 068/232] drm/amd/amdgpu: Simplify gfx_v9_0_wait_for_idle() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2ba02739a2280..a21182debb3d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2937,15 +2937,10 @@ static bool gfx_v9_0_is_idle(void *handle) static int gfx_v9_0_wait_for_idle(void *handle) { unsigned i; - u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) & - GRBM_STATUS__GUI_ACTIVE_MASK; - - if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + if (gfx_v9_0_is_idle(handle)) return 0; udelay(1); } From 6849d47cabc36e8f2697043f8c81e7719876dfd3 Mon Sep 17 00:00:00 2001 From: Roger He Date: Wed, 30 Aug 2017 13:01:19 +0800 Subject: [PATCH 069/232] drm/amdgpu: handle all fragment sizes v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can improve performance for some cases. v2 (chk): handle all sizes, simplify the patch quite a bit v3 (chk): adjust dw estimation as well v4 (chk): use single loop, make end mask 64bit Signed-off-by: Roger He Signed-off-by: Christian König Tested-by: Roger He Reviewed-by: Felix Kuehling Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 55 ++++++++++++-------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 26eb7dce5fe55..b83e0fa1f269f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1420,8 +1420,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { - int r; - /** * The MC L1 TLB supports variable sized pages, based on a fragment * field in the PTE. When this field is set to a non-zero value, page @@ -1440,39 +1438,38 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. */ - unsigned pages_per_frag = params->adev->vm_manager.fragment_size; - uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); - uint64_t frag_align = 1 << pages_per_frag; - - uint64_t frag_start = ALIGN(start, frag_align); - uint64_t frag_end = end & ~(frag_align - 1); + unsigned max_frag = params->adev->vm_manager.fragment_size; + int r; /* system pages are non continuously */ - if (params->src || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) + if (params->src || !(flags & AMDGPU_PTE_VALID)) return amdgpu_vm_update_ptes(params, start, end, dst, flags); - /* handle the 4K area at the beginning */ - if (start != frag_start) { - r = amdgpu_vm_update_ptes(params, start, frag_start, - dst, flags); + while (start != end) { + uint64_t frag_flags, frag_end; + unsigned frag; + + /* This intentionally wraps around if no bit is set */ + frag = min((unsigned)ffs(start) - 1, + (unsigned)fls64(end - start) - 1); + if (frag >= max_frag) { + frag_flags = AMDGPU_PTE_FRAG(max_frag); + frag_end = end & ~((1ULL << max_frag) - 1); + } else { + frag_flags = AMDGPU_PTE_FRAG(frag); + frag_end = start + (1 << frag); + } + + r = amdgpu_vm_update_ptes(params, start, frag_end, dst, + flags | frag_flags); if (r) return r; - dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; - } - - /* handle the area in the middle */ - r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst, - flags | frag_flags); - if (r) - return r; - /* handle the 4K area at the end */ - if (frag_end != end) { - dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; - r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags); + dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE; + start = frag_end; } - return r; + + return 0; } /** @@ -1562,8 +1559,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* set page commands needed */ ndw += ncmds * 10; - /* two extra commands for begin/end of fragment */ - ndw += 2 * 10; + /* extra commands for begin/end fragments */ + ndw += 2 * 10 * adev->vm_manager.fragment_size; params.func = amdgpu_vm_do_set_ptes; } From f5830465967799de0334340d1888f7d2c0bc17f5 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 1 Sep 2017 15:07:41 +0800 Subject: [PATCH 070/232] drm/amdgpu: declare the new firmware files needed by polaris asics Signed-off-by: Evan Quan Reviewed-by: Flora Cui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6666fcd8b08fb..666a1545e949a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -125,24 +125,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin"); MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = From 481c2e94897eb74abcfb4a3cdb87f5f89499b93f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 1 Sep 2017 14:46:19 +0200 Subject: [PATCH 071/232] drm/amdgpu: fix moved list handling in the VM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only move BOs to the moved/relocated list when they aren't already on a list. This prevents accidential removal from the evicted list. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b83e0fa1f269f..c9223a5184dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1178,7 +1178,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, entry->addr = ~0ULL; spin_lock(&vm->status_lock); - list_move(&entry->base.vm_status, &vm->relocated); + if (list_empty(&entry->base.vm_status)) + list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); amdgpu_vm_invalidate_level(vm, entry); } @@ -2091,7 +2092,8 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { spin_lock(&vm->status_lock); - list_move(&bo_va->base.vm_status, &vm->moved); + if (list_empty(&bo_va->base.vm_status)) + list_add(&bo_va->base.vm_status, &vm->moved); spin_unlock(&vm->status_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); @@ -2446,7 +2448,8 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, } spin_lock(&bo_base->vm->status_lock); - list_move(&bo_base->vm_status, &bo_base->vm->moved); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, &vm->moved); spin_unlock(&bo_base->vm->status_lock); } } From 70a9c6b9010848e101ff67a886f2e75209b2f0d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 1 Sep 2017 09:22:56 +0200 Subject: [PATCH 072/232] drm/amdgpu: fix placement flags in amdgpu_ttm_bind MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we lose the NO_EVICT flag and can try to evict pinned BOs. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6ea96e1fb273a..841a5699bef05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -819,7 +819,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); if (unlikely(r)) From f6886c472654e12d9cab4c4462a1c38c732468b8 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 09:13:04 -0400 Subject: [PATCH 073/232] drm/amd/amdgpu: Support full range of GFX ring names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now there's only one but the rest of the code is being setup to support more so might as well fix this up too. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index a21182debb3d9..2ab049c45b1d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1330,7 +1330,10 @@ static int gfx_v9_0_sw_init(void *handle) for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; - sprintf(ring->name, "gfx"); + if (!i) + sprintf(ring->name, "gfx"); + else + sprintf(ring->name, "gfx_%d", i); ring->use_doorbell = true; ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; r = amdgpu_ring_init(adev, ring, 1024, From ae6d1416fa1624a6a2e4f2d392b3c0bdcc570712 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 09:27:31 -0400 Subject: [PATCH 074/232] drm/amd/amdgpu: Simplify gmc_v9_0_vm_fault_interrupt_state() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 51 +++++++++------------------ 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d04d0b1232120..1cb7aa2af6837 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -77,7 +77,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { struct amdgpu_vmhub *hub; - u32 tmp, reg, bits, i; + u32 tmp, reg, bits, i, j; bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | @@ -89,43 +89,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; - for (i = 0; i< 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits; - WREG32(reg, tmp); - } - - /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits; - WREG32(reg, tmp); + for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + hub = &adev->vmhub[j]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + i; + tmp = RREG32(reg); + tmp &= ~bits; + WREG32(reg, tmp); + } } break; case AMDGPU_IRQ_STATE_ENABLE: - /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; - for (i = 0; i< 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits; - WREG32(reg, tmp); + for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + hub = &adev->vmhub[j]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + i; + tmp = RREG32(reg); + tmp |= bits; + WREG32(reg, tmp); + } } - - /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits; - WREG32(reg, tmp); - } - break; default: break; } From 846347c9f527bc6a771dca568c866704c290e103 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 09:52:21 -0400 Subject: [PATCH 075/232] drm/amd/amdgpu: Tidy up gmc_v9_0_gart_enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1cb7aa2af6837..d7cfee8072877 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -719,14 +719,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); - tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; - WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); - if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; else @@ -734,7 +731,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gfxhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value); - gmc_v9_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", From 4d9c333a464aef3341357035cb75617ebb052c65 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 09:53:44 -0400 Subject: [PATCH 076/232] drm/amd/amdgpu: Tidy up gmc_v9_0_hw_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d7cfee8072877..0766dad1fafee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -749,17 +749,11 @@ static int gmc_v9_0_hw_init(void *handle) gmc_v9_0_init_golden_registers(adev); if (adev->mode_info.num_crtc) { - u32 tmp; - /* Lockout access through VGA aperture*/ - tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp); + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); /* disable VGA render */ - tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp); + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); } r = gmc_v9_0_gart_enable(adev); From f053cd478ecfe9cbc9988912fe63a6d723838b2e Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 09:55:04 -0400 Subject: [PATCH 077/232] drm/amd/amdgpu: Cleanup gmc_v9_0_suspend() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though fini returns 0 always it could theoretically fail in the future. Might as well return it instead of 0. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0766dad1fafee..7ca9cbec3004f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -795,9 +795,7 @@ static int gmc_v9_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gmc_v9_0_hw_fini(adev); - - return 0; + return gmc_v9_0_hw_fini(adev); } static int gmc_v9_0_resume(void *handle) From 60233daca067cbef01fef6fdf457c1c11defcc90 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 1 Sep 2017 12:52:38 -0400 Subject: [PATCH 078/232] drm/ttm: Fix trace include path (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Thierry Reding Reviewed-by: Christian König Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher (v2): Drop Makefile change too. --- drivers/gpu/drm/ttm/Makefile | 1 - drivers/gpu/drm/ttm/ttm_trace.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index a44fdfbe6351a..ab2bef1219e55 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -1,7 +1,6 @@ # # Makefile for the drm device driver. This driver provides support for the -ccflags-y := -I$(src)/. ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ diff --git a/drivers/gpu/drm/ttm/ttm_trace.h b/drivers/gpu/drm/ttm/ttm_trace.h index 23279b9b8e645..715ce68b7b333 100644 --- a/drivers/gpu/drm/ttm/ttm_trace.h +++ b/drivers/gpu/drm/ttm/ttm_trace.h @@ -82,6 +82,6 @@ TRACE_EVENT(ttm_dma_unmap, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/ttm/ #include From 1430f73beb7fd4655d3fbd2e373615eca016f9c3 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 1 Sep 2017 12:54:01 -0400 Subject: [PATCH 079/232] drm/amdgpu: Use correct path to trace include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The header comment in include/trace/define_trace.h specifies that the TRACE_INCLUDE_PATH needs to be relative to the define_trace.h header rather than the trace file including it. Most instances get that wrong and work around it by adding the $(src) directory to the include path. While this works, it is preferable to refer to the correct path to the trace file in the first place and avoid any workaround. Reviewed-by: Christian König Signed-off-by: Thierry Reding Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 658bac0cdc5e9..25a95c95df14d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -133,5 +133,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile amdgpu-y += $(AMD_POWERPLAY_FILES) obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o - -CFLAGS_amdgpu_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index b1f97417241d0..213988f336edd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -417,5 +417,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu #include From 56d11d580974958bd3c7ae4a42368ae22cb50354 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 1 Sep 2017 16:49:53 +0200 Subject: [PATCH 080/232] drm/radeon: Use correct path to trace include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The header comment in include/trace/define_trace.h specifies that the TRACE_INCLUDE_PATH needs to be relative to the define_trace.h header rather than the trace file including it. Most instances get that wrong and work around it by adding the $(src) directory to the include path. While this works, it is preferable to refer to the correct path to the trace file in the first place and avoid any workaround. Reviewed-by: Christian König Signed-off-by: Thierry Reding Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/Makefile | 2 -- drivers/gpu/drm/radeon/radeon_trace.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 4acbb944bcd2b..be16c63902167 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -109,5 +109,3 @@ radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o obj-$(CONFIG_DRM_RADEON)+= radeon.o - -CFLAGS_radeon_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index fdce4062901fe..815eaa8c394bb 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -204,5 +204,5 @@ DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/radeon #include From cd00a424d26f9b954f5a084b88800e859fc0c52f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 29 Aug 2017 10:55:05 -0400 Subject: [PATCH 081/232] drm/amd/powerplay: fix sclk setting for profile mode for CZ/ST Need to select dpm0 to avoid clock fluctuations. Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 47 +------------------ 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 9f2c0378c0592..b9c61ece67840 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1310,48 +1310,9 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) return 0; } -static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk) -{ - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(hwmgr, - sclk, - PPSMC_MSG_SetSclkSoftMin)); - - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(hwmgr, - sclk, - PPSMC_MSG_SetSclkSoftMax)); - return 0; -} - -static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk) -{ - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - int32_t tmp_sclk; - int32_t count; - - tmp_sclk = table->entries[table->count-1].clk * 70 / 100; - - for (count = table->count-1; count >= 0; count--) { - if (tmp_sclk >= table->entries[count].clk) { - tmp_sclk = table->entries[count].clk; - *sclk = tmp_sclk; - break; - } - } - if (count < 0) - *sclk = table->entries[0].clk; - - return 0; -} - static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { - uint32_t sclk = 0; int ret = 0; uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | @@ -1389,6 +1350,7 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, break; case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: ret = cz_phm_force_dpm_lowest(hwmgr); if (ret) return ret; @@ -1400,13 +1362,6 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return ret; hwmgr->dpm_level = level; break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = cz_get_profiling_clk(hwmgr, &sclk); - if (ret) - return ret; - hwmgr->dpm_level = level; - cz_phm_force_dpm_sclk(hwmgr, sclk); - break; case AMD_DPM_FORCED_LEVEL_MANUAL: hwmgr->dpm_level = level; break; From df1e63942063a0638a5813e5904988d834db2665 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 1 Sep 2017 13:46:20 +0800 Subject: [PATCH 082/232] drm/amd/powerplay: delete eventmgr layer in poweprlay Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 2 +- drivers/gpu/drm/amd/powerplay/Makefile | 5 +- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 112 +------- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 3 +- .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 18 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 214 +++++++++------ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 246 ++++++++++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h | 40 +++ .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 96 +------ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 5 + .../gpu/drm/amd/powerplay/inc/pp_instance.h | 2 - 13 files changed, 475 insertions(+), 282 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 8c96a4caa715e..3eba4137508be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -383,8 +383,8 @@ struct amdgpu_dpm_funcs { #define amdgpu_dpm_set_mclk_od(adev, value) \ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) -#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ - (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) +#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \ + ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) #define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7df503aedb692..10c5d78081edf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -119,7 +119,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, } if (adev->pp_enabled) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); } else { mutex_lock(&adev->pm.mutex); adev->pm.dpm.user_state = state; @@ -330,7 +330,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (state != POWER_STATE_TYPE_INTERNAL_BOOT && state != POWER_STATE_TYPE_DEFAULT) { amdgpu_dpm_dispatch_task(adev, - AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); adev->pp_force_state_enabled = true; } } @@ -559,7 +559,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (adev->pp_enabled) { amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); } else if (adev->pm.funcs->set_sclk_od) { adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; @@ -605,7 +605,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (adev->pp_enabled) { amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); } else if (adev->pm.funcs->set_mclk_od) { adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; @@ -1496,7 +1496,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) } if (adev->pp_enabled) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); } else { mutex_lock(&adev->pm.mutex); adev->pm.dpm.new_active_crtcs = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index b7e1c026c0c86..b0c4db8098edc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -128,7 +128,7 @@ static int amdgpu_pp_late_init(void *handle) if (adev->pp_enabled && adev->pm.dpm_enabled) { amdgpu_pm_sysfs_init(adev); - amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL); } return ret; diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index 4e132b936e3d2..68b417ac94ddb 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -4,12 +4,11 @@ subdir-ccflags-y += \ -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/powerplay/smumgr\ - -I$(FULL_AMD_PATH)/powerplay/hwmgr \ - -I$(FULL_AMD_PATH)/powerplay/eventmgr + -I$(FULL_AMD_PATH)/powerplay/hwmgr AMD_PP_PATH = ../powerplay -PP_LIBS = smumgr hwmgr eventmgr +PP_LIBS = smumgr hwmgr AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS))) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index f73e80c4bf337..94bed3c08161b 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -29,7 +29,6 @@ #include "amd_powerplay.h" #include "pp_instance.h" #include "power_state.h" -#include "eventmanager.h" static inline int pp_check(struct pp_instance *handle) @@ -43,8 +42,7 @@ static inline int pp_check(struct pp_instance *handle) if (handle->pm_en == 0) return PP_DPM_DISABLED; - if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL - || handle->eventmgr == NULL) + if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL) return PP_DPM_DISABLED; return 0; @@ -69,14 +67,6 @@ static int pp_early_init(void *handle) return PP_DPM_DISABLED; } - ret = eventmgr_early_init(pp_handle); - if (ret) { - kfree(pp_handle->hwmgr); - pp_handle->hwmgr = NULL; - pp_handle->pm_en = 0; - return PP_DPM_DISABLED; - } - return 0; } @@ -122,7 +112,6 @@ static int pp_sw_fini(void *handle) static int pp_hw_init(void *handle) { struct pp_smumgr *smumgr; - struct pp_eventmgr *eventmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -146,38 +135,23 @@ static int pp_hw_init(void *handle) ret = hwmgr_hw_init(pp_handle); if (ret) goto err; - - eventmgr = pp_handle->eventmgr; - if (eventmgr->pp_eventmgr_init == NULL || - eventmgr->pp_eventmgr_init(eventmgr)) - goto err; - return 0; err: pp_handle->pm_en = 0; - kfree(pp_handle->eventmgr); kfree(pp_handle->hwmgr); pp_handle->hwmgr = NULL; - pp_handle->eventmgr = NULL; return PP_DPM_DISABLED; } static int pp_hw_fini(void *handle) { - struct pp_eventmgr *eventmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; ret = pp_check(pp_handle); - - if (ret == 0) { - eventmgr = pp_handle->eventmgr; - - if (eventmgr->pp_eventmgr_fini != NULL) - eventmgr->pp_eventmgr_fini(eventmgr); - + if (ret == 0) hwmgr_hw_fini(pp_handle); - } + return 0; } @@ -244,8 +218,6 @@ static int pp_set_powergating_state(void *handle, static int pp_suspend(void *handle) { - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; @@ -256,16 +228,11 @@ static int pp_suspend(void *handle) else if (ret != 0) return ret; - eventmgr = pp_handle->eventmgr; - pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data); - - return 0; + return hwmgr_hw_suspend(pp_handle); } static int pp_resume(void *handle) { - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; struct pp_smumgr *smumgr; int ret, ret1; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -290,11 +257,7 @@ static int pp_resume(void *handle) if (ret1 == PP_DPM_DISABLED) return 0; - eventmgr = pp_handle->eventmgr; - - pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data); - - return 0; + return hwmgr_hw_resume(pp_handle); } const struct amd_ip_funcs pp_ip_funcs = { @@ -344,6 +307,7 @@ static int pp_dpm_force_performance_level(void *handle, } mutex_lock(&pp_handle->pp_lock); + hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); mutex_unlock(&pp_handle->pp_lock); return 0; @@ -461,60 +425,21 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate) return ret; } -static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) -{ - switch (state) { - case POWER_STATE_TYPE_BATTERY: - return PP_StateUILabel_Battery; - case POWER_STATE_TYPE_BALANCED: - return PP_StateUILabel_Balanced; - case POWER_STATE_TYPE_PERFORMANCE: - return PP_StateUILabel_Performance; - default: - return PP_StateUILabel_None; - } -} - -static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, +static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, void *input, void *output) { int ret = 0; - struct pem_event_data data = { {0} }; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret != 0) return ret; - mutex_lock(&pp_handle->pp_lock); - switch (event_id) { - case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - case AMD_PP_EVENT_ENABLE_USER_STATE: - { - enum amd_pm_state_type ps; - - if (input == NULL) { - ret = -EINVAL; - break; - } - ps = *(unsigned long *)input; - data.requested_ui_label = power_state_convert(ps); - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - } - case AMD_PP_EVENT_COMPLETE_INIT: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - case AMD_PP_EVENT_READJUST_POWER_STATE: - ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); - break; - default: - break; - } + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr_handle_task(pp_handle, task_id, input, output); mutex_unlock(&pp_handle->pp_lock); + return ret; } @@ -1190,10 +1115,8 @@ int amd_powerplay_destroy(void *handle) struct pp_instance *instance = (struct pp_instance *)handle; if (instance->pm_en) { - kfree(instance->eventmgr); kfree(instance->hwmgr); instance->hwmgr = NULL; - instance->eventmgr = NULL; } kfree(instance->smu_mgr); @@ -1206,8 +1129,6 @@ int amd_powerplay_destroy(void *handle) int amd_powerplay_reset(void *handle) { struct pp_instance *instance = (struct pp_instance *)handle; - struct pp_eventmgr *eventmgr; - struct pem_event_data event_data = { {0} }; int ret; if (cgs_is_virtualization_enabled(instance->smu_mgr->device)) @@ -1217,7 +1138,7 @@ int amd_powerplay_reset(void *handle) if (ret != 0) return ret; - ret = pp_hw_fini(handle); + ret = pp_hw_fini(instance); if (ret) return ret; @@ -1225,16 +1146,7 @@ int amd_powerplay_reset(void *handle) if (ret) return PP_DPM_DISABLED; - eventmgr = instance->eventmgr; - - if (eventmgr->pp_eventmgr_init == NULL) - return PP_DPM_DISABLED; - - ret = eventmgr->pp_eventmgr_init(eventmgr); - if (ret) - return ret; - - return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data); + return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL); } /* export this function to DAL */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f0277c16c2bff..79119d6cd07f2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -9,7 +9,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ smu7_clockpowergating.o \ vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ - vega10_thermal.o pp_overdriver.o rv_hwmgr.o + vega10_thermal.o rv_hwmgr.o pp_psm.o\ + pp_overdriver.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 967f50f54384d..ce378bd216613 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -26,6 +26,10 @@ #include "hardwaremanager.h" #include "power_state.h" + +#define TEMP_RANGE_MIN (90 * 1000) +#define TEMP_RANGE_MAX (120 * 1000) + #define PHM_FUNC_CHECK(hw) \ do { \ if ((hw) == NULL || (hw)->hwmgr_func == NULL) \ @@ -292,7 +296,19 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info) */ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range) { - return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL); + struct PP_TemperatureRange range; + + if (temperature_range == NULL) { + range.max = TEMP_RANGE_MAX; + range.min = TEMP_RANGE_MIN; + } else { + range.max = temperature_range->max; + range.min = temperature_range->min; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), &range, NULL); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 9547f265a8bb8..c6157bcdf7d69 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -35,9 +35,9 @@ #include "ppsmc.h" #include "pp_acpi.h" #include "amd_acpi.h" +#include "pp_psm.h" extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); - static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); @@ -131,80 +131,6 @@ int hwmgr_early_init(struct pp_instance *handle) return 0; } -static int hw_init_power_state_table(struct pp_hwmgr *hwmgr) -{ - int result; - unsigned int i; - unsigned int table_entries; - struct pp_power_state *state; - int size; - - if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) - return -EINVAL; - - if (hwmgr->hwmgr_func->get_power_state_size == NULL) - return -EINVAL; - - hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); - - hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + - sizeof(struct pp_power_state); - - hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); - if (hwmgr->ps == NULL) - return -ENOMEM; - - hwmgr->request_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->request_ps == NULL) { - kfree(hwmgr->ps); - hwmgr->ps = NULL; - return -ENOMEM; - } - - hwmgr->current_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->current_ps == NULL) { - kfree(hwmgr->request_ps); - kfree(hwmgr->ps); - hwmgr->request_ps = NULL; - hwmgr->ps = NULL; - return -ENOMEM; - } - - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); - - if (state->classification.flags & PP_StateClassificationFlag_Boot) { - hwmgr->boot_ps = state; - memcpy(hwmgr->current_ps, state, size); - memcpy(hwmgr->request_ps, state, size); - } - - state->id = i + 1; /* assigned unique num for every power state id */ - - if (state->classification.flags & PP_StateClassificationFlag_Uvd) - hwmgr->uvd_ps = state; - state = (struct pp_power_state *)((unsigned long)state + size); - } - - return 0; -} - -static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr) -{ - if (hwmgr == NULL) - return -EINVAL; - - kfree(hwmgr->current_ps); - kfree(hwmgr->request_ps); - kfree(hwmgr->ps); - hwmgr->request_ps = NULL; - hwmgr->ps = NULL; - hwmgr->current_ps = NULL; - return 0; -} - int hwmgr_hw_init(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; @@ -228,9 +154,22 @@ int hwmgr_hw_init(struct pp_instance *handle) if (ret) goto err1; - ret = hw_init_power_state_table(hwmgr); + ret = psm_init_power_state_table(hwmgr); + if (ret) + goto err2; + + ret = phm_setup_asic(hwmgr); if (ret) goto err2; + + ret = phm_enable_dynamic_state_management(hwmgr); + if (ret) + goto err2; + ret = phm_start_thermal_controller(hwmgr, NULL); + ret |= psm_set_performance_states(hwmgr); + if (ret) + goto err2; + return 0; err2: if (hwmgr->hwmgr_func->backend_fini) @@ -247,19 +186,138 @@ int hwmgr_hw_fini(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; - if (handle == NULL) + if (handle == NULL || handle->hwmgr == NULL) return -EINVAL; hwmgr = handle->hwmgr; + phm_stop_thermal_controller(hwmgr); + psm_set_boot_states(hwmgr); + phm_display_configuration_changed(hwmgr); + psm_adjust_power_state_dynamic(hwmgr, false, NULL); + phm_disable_dynamic_state_management(hwmgr); + phm_disable_clock_power_gatings(hwmgr); + if (hwmgr->hwmgr_func->backend_fini) hwmgr->hwmgr_func->backend_fini(hwmgr); if (hwmgr->pptable_func->pptable_fini) hwmgr->pptable_func->pptable_fini(hwmgr); - return hw_fini_power_state_table(hwmgr); + return psm_fini_power_state_table(hwmgr); } +int hwmgr_hw_suspend(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + hwmgr = handle->hwmgr; + phm_disable_smc_firmware_ctf(hwmgr); + ret = psm_set_boot_states(hwmgr); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + if (ret) + return ret; + ret = phm_power_down_asic(hwmgr); + + return ret; +} + +int hwmgr_hw_resume(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + ret = phm_setup_asic(hwmgr); + if (ret) + return ret; + + ret = phm_enable_dynamic_state_management(hwmgr); + if (ret) + return ret; + ret = phm_start_thermal_controller(hwmgr, NULL); + if (ret) + return ret; + + ret |= psm_set_performance_states(hwmgr); + if (ret) + return ret; + + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + + return ret; +} + +static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) +{ + switch (state) { + case POWER_STATE_TYPE_BATTERY: + return PP_StateUILabel_Battery; + case POWER_STATE_TYPE_BALANCED: + return PP_StateUILabel_Balanced; + case POWER_STATE_TYPE_PERFORMANCE: + return PP_StateUILabel_Performance; + default: + return PP_StateUILabel_None; + } +} + +int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, + void *input, void *output) +{ + int ret = 0; + struct pp_hwmgr *hwmgr; + + if (handle == NULL || handle->hwmgr == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + + switch (task_id) { + case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = phm_set_cpu_power_state(hwmgr); + if (ret) + return ret; + ret = psm_set_performance_states(hwmgr); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + break; + case AMD_PP_TASK_ENABLE_USER_STATE: + { + enum amd_pm_state_type ps; + enum PP_StateUILabel requested_ui_label; + struct pp_power_state *requested_ps; + + if (input == NULL) { + ret = -EINVAL; + break; + } + ps = *(unsigned long *)input; + + requested_ui_label = power_state_convert(ps); + ret = psm_set_user_performance_state(hwmgr, requested_ui_label, requested_ps); + if (ret) + return ret; + ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps); + break; + } + case AMD_PP_TASK_COMPLETE_INIT: + case AMD_PP_TASK_READJUST_POWER_STATE: + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + break; + default: + break; + } + return ret; +} /** * Returns once the part of the register indicated by the mask has * reached the given value. diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c new file mode 100644 index 0000000000000..7656324957a8d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -0,0 +1,246 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include "pp_psm.h" + +int psm_init_power_state_table(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned int i; + unsigned int table_entries; + struct pp_power_state *state; + int size; + + if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) + return -EINVAL; + + if (hwmgr->hwmgr_func->get_power_state_size == NULL) + return -EINVAL; + + hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); + + hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + + sizeof(struct pp_power_state); + + hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); + if (hwmgr->ps == NULL) + return -ENOMEM; + + hwmgr->request_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->request_ps == NULL) { + kfree(hwmgr->ps); + hwmgr->ps = NULL; + return -ENOMEM; + } + + hwmgr->current_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->current_ps == NULL) { + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; + return -ENOMEM; + } + + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); + + if (state->classification.flags & PP_StateClassificationFlag_Boot) { + hwmgr->boot_ps = state; + memcpy(hwmgr->current_ps, state, size); + memcpy(hwmgr->request_ps, state, size); + } + + state->id = i + 1; /* assigned unique num for every power state id */ + + if (state->classification.flags & PP_StateClassificationFlag_Uvd) + hwmgr->uvd_ps = state; + state = (struct pp_power_state *)((unsigned long)state + size); + } + + return 0; +} + +int psm_fini_power_state_table(struct pp_hwmgr *hwmgr) +{ + if (hwmgr == NULL) + return -EINVAL; + + kfree(hwmgr->current_ps); + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; + hwmgr->current_ps = NULL; + return 0; +} + +static int psm_get_ui_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel ui_label, + unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.ui_label & ui_label) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr, + enum PP_StateClassificationFlag flag, + unsigned long *state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->classification.flags & flag) { + *state_id = state->id; + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id) +{ + struct pp_power_state *state; + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + + state = hwmgr->ps; + + for (i = 0; i < table_entries; i++) { + if (state->id == state_id) { + memcpy(hwmgr->request_ps, state, hwmgr->ps_size); + return 0; + } + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + return -EINVAL; +} + +int psm_set_boot_states(struct pp_hwmgr *hwmgr) +{ + unsigned long state_id; + int ret = -EINVAL; + + if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot, + &state_id)) + ret = psm_set_states(hwmgr, state_id); + + return ret; +} + +int psm_set_performance_states(struct pp_hwmgr *hwmgr) +{ + unsigned long state_id; + int ret = -EINVAL; + + if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance, + &state_id)) + ret = psm_set_states(hwmgr, state_id); + + return ret; +} + +int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel label_id, + struct pp_power_state *state) +{ + int table_entries; + int i; + + table_entries = hwmgr->num_ps; + state = hwmgr->ps; + +restart_search: + for (i = 0; i < table_entries; i++) { + if (state->classification.ui_label & label_id) + return 0; + state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + } + + switch (label_id) { + case PP_StateUILabel_Battery: + case PP_StateUILabel_Balanced: + label_id = PP_StateUILabel_Performance; + goto restart_search; + default: + break; + } + return -EINVAL; +} + +int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, + struct pp_power_state *new_ps) +{ + struct pp_power_state *pcurrent; + struct pp_power_state *requested; + bool equal; + + if (skip) + return 0; + + if (new_ps != NULL) + requested = new_ps; + else + requested = hwmgr->request_ps; + + pcurrent = hwmgr->current_ps; + + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); + + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, + &pcurrent->hardware, &requested->hardware, &equal))) + equal = false; + + if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { + phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); + memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h new file mode 100644 index 0000000000000..aa44e60ec1b6e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h @@ -0,0 +1,40 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_PSM_H +#define PP_PSM_H + +#include "hwmgr.h" + +int psm_init_power_state_table(struct pp_hwmgr *hwmgr); +int psm_fini_power_state_table(struct pp_hwmgr *hwmgr); +int psm_set_boot_states(struct pp_hwmgr *hwmgr); +int psm_set_performance_states(struct pp_hwmgr *hwmgr); +int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, + enum PP_StateUILabel label_id, + struct pp_power_state *state); +int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, + bool skip, + struct pp_power_state *new_ps); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 07e9c0b5915db..f471b99f456bd 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -50,94 +50,12 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_GPU_POWER, }; -enum amd_pp_event { - AMD_PP_EVENT_INITIALIZE = 0, - AMD_PP_EVENT_UNINITIALIZE, - AMD_PP_EVENT_POWER_SOURCE_CHANGE, - AMD_PP_EVENT_SUSPEND, - AMD_PP_EVENT_RESUME, - AMD_PP_EVENT_ENTER_REST_STATE, - AMD_PP_EVENT_EXIT_REST_STATE, - AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, - AMD_PP_EVENT_THERMAL_NOTIFICATION, - AMD_PP_EVENT_VBIOS_NOTIFICATION, - AMD_PP_EVENT_ENTER_THERMAL_STATE, - AMD_PP_EVENT_EXIT_THERMAL_STATE, - AMD_PP_EVENT_ENTER_FORCED_STATE, - AMD_PP_EVENT_EXIT_FORCED_STATE, - AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE, - AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE, - AMD_PP_EVENT_ENTER_SCREEN_SAVER, - AMD_PP_EVENT_EXIT_SCREEN_SAVER, - AMD_PP_EVENT_VPU_RECOVERY_BEGIN, - AMD_PP_EVENT_VPU_RECOVERY_END, - AMD_PP_EVENT_ENABLE_POWER_PLAY, - AMD_PP_EVENT_DISABLE_POWER_PLAY, - AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL, - AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE, - AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE, - AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE, - AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE, - AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST, - AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST, - AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE, - AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE, - AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING, - AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING, - AMD_PP_EVENT_ENABLE_CGPG, - AMD_PP_EVENT_DISABLE_CGPG, - AMD_PP_EVENT_ENTER_TEXT_MODE, - AMD_PP_EVENT_EXIT_TEXT_MODE, - AMD_PP_EVENT_VIDEO_START, - AMD_PP_EVENT_VIDEO_STOP, - AMD_PP_EVENT_ENABLE_USER_STATE, - AMD_PP_EVENT_DISABLE_USER_STATE, - AMD_PP_EVENT_READJUST_POWER_STATE, - AMD_PP_EVENT_START_INACTIVITY, - AMD_PP_EVENT_STOP_INACTIVITY, - AMD_PP_EVENT_LINKED_ADAPTERS_READY, - AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE, - AMD_PP_EVENT_COMPLETE_INIT, - AMD_PP_EVENT_CRITICAL_THERMAL_FAULT, - AMD_PP_EVENT_BACKLIGHT_CHANGED, - AMD_PP_EVENT_ENABLE_VARI_BRIGHT, - AMD_PP_EVENT_DISABLE_VARI_BRIGHT, - AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS, - AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS, - AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL, - AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT, - AMD_PP_EVENT_SCREEN_ON, - AMD_PP_EVENT_SCREEN_OFF, - AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE, - AMD_PP_EVENT_ENTER_ULP_STATE, - AMD_PP_EVENT_EXIT_ULP_STATE, - AMD_PP_EVENT_REGISTER_IP_STATE, - AMD_PP_EVENT_UNREGISTER_IP_STATE, - AMD_PP_EVENT_ENTER_MGPU_MODE, - AMD_PP_EVENT_EXIT_MGPU_MODE, - AMD_PP_EVENT_ENTER_MULTI_GPU_MODE, - AMD_PP_EVENT_PRE_SUSPEND, - AMD_PP_EVENT_PRE_RESUME, - AMD_PP_EVENT_ENTER_BACOS, - AMD_PP_EVENT_EXIT_BACOS, - AMD_PP_EVENT_RESUME_BACO, - AMD_PP_EVENT_RESET_BACO, - AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS, - AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS, - AMD_PP_EVENT_START_COMPUTE_APPLICATION, - AMD_PP_EVENT_STOP_COMPUTE_APPLICATION, - AMD_PP_EVENT_REDUCE_POWER_LIMIT, - AMD_PP_EVENT_ENTER_FRAME_LOCK, - AMD_PP_EVENT_EXIT_FRAME_LOOCK, - AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO, - AMD_PP_EVENT_LONG_IDLE_ENTER_BACO, - AMD_PP_EVENT_LONG_IDLE_EXIT_BACO, - AMD_PP_EVENT_HIBERNATE, - AMD_PP_EVENT_CONNECTED_STANDBY, - AMD_PP_EVENT_ENTER_SELF_REFRESH, - AMD_PP_EVENT_EXIT_SELF_REFRESH, - AMD_PP_EVENT_START_AVFS_BTC, - AMD_PP_EVENT_MAX +enum amd_pp_task { + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + AMD_PP_TASK_ENABLE_USER_STATE, + AMD_PP_TASK_READJUST_POWER_STATE, + AMD_PP_TASK_COMPLETE_INIT, + AMD_PP_TASK_MAX }; struct amd_pp_init { @@ -366,7 +284,7 @@ struct amd_powerplay_funcs { int (*get_mclk)(void *handle, bool low); int (*powergate_vce)(void *handle, bool gate); int (*powergate_uvd)(void *handle, bool gate); - int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, + int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, void *input, void *output); int (*set_fan_control_mode)(void *handle, uint32_t mode); int (*get_fan_control_mode)(void *handle); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index b1a6372608de7..c649354f08ca9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -797,6 +797,11 @@ struct pp_hwmgr { extern int hwmgr_early_init(struct pp_instance *handle); extern int hwmgr_hw_init(struct pp_instance *handle); extern int hwmgr_hw_fini(struct pp_instance *handle); +extern int hwmgr_hw_suspend(struct pp_instance *handle); +extern int hwmgr_hw_resume(struct pp_instance *handle); +extern int hwmgr_handle_task(struct pp_instance *handle, + enum amd_pp_task task_id, + void *input, void *output); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h index 4c3b537a714f5..5bf2ee449e42c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -25,7 +25,6 @@ #include "smumgr.h" #include "hwmgr.h" -#include "eventmgr.h" #define PP_VALID 0x1F1F1F1F @@ -38,7 +37,6 @@ struct pp_instance { void *device; struct pp_smumgr *smu_mgr; struct pp_hwmgr *hwmgr; - struct pp_eventmgr *eventmgr; struct mutex pp_lock; }; From 47047263c52779f1f3393c32e3e53661b53a372e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 1 Sep 2017 13:48:11 +0800 Subject: [PATCH 083/232] drm/amd/powerplay: delete eventmgr related files. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/eventmgr/Makefile | 11 - .../powerplay/eventmgr/eventactionchains.c | 291 ------------ .../powerplay/eventmgr/eventactionchains.h | 62 --- .../drm/amd/powerplay/eventmgr/eventinit.c | 195 -------- .../drm/amd/powerplay/eventmgr/eventinit.h | 34 -- .../amd/powerplay/eventmgr/eventmanagement.c | 215 --------- .../amd/powerplay/eventmgr/eventmanagement.h | 59 --- .../gpu/drm/amd/powerplay/eventmgr/eventmgr.c | 104 ---- .../amd/powerplay/eventmgr/eventsubchains.c | 410 ---------------- .../amd/powerplay/eventmgr/eventsubchains.h | 100 ---- .../drm/amd/powerplay/eventmgr/eventtasks.c | 445 ------------------ .../drm/amd/powerplay/eventmgr/eventtasks.h | 89 ---- drivers/gpu/drm/amd/powerplay/eventmgr/psm.c | 119 ----- drivers/gpu/drm/amd/powerplay/eventmgr/psm.h | 38 -- .../gpu/drm/amd/powerplay/inc/eventmanager.h | 109 ----- drivers/gpu/drm/amd/powerplay/inc/eventmgr.h | 124 ----- 16 files changed, 2405 deletions(-) delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/Makefile delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/psm.c delete mode 100644 drivers/gpu/drm/amd/powerplay/eventmgr/psm.h delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/eventmanager.h delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/eventmgr.h diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile deleted file mode 100644 index 7509e3850087c..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# -# Makefile for the 'event manager' sub-component of powerplay. -# It provides the event management services for the driver. - -EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \ - eventactionchains.o eventsubchains.o eventtasks.o psm.o - -AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR)) - -AMD_POWERPLAY_FILES += $(AMD_PP_EVENT) - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c deleted file mode 100644 index 8cee4e0f9fde6..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmgr.h" -#include "eventactionchains.h" -#include "eventsubchains.h" - -static const pem_event_action * const initialize_event[] = { - block_adjust_power_state_tasks, - power_budget_tasks, - system_config_tasks, - setup_asic_tasks, - enable_dynamic_state_management_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - initialize_thermal_controller_tasks, - conditionally_force_3d_performance_state_tasks, - process_vbios_eventinfo_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain initialize_action_chain = { - "Initialize", - initialize_event -}; - -static const pem_event_action * const uninitialize_event[] = { - ungate_all_display_phys_tasks, - uninitialize_display_phy_access_tasks, - disable_gfx_voltage_island_power_gating_tasks, - disable_gfx_clock_gating_tasks, - uninitialize_thermal_controller_tasks, - set_boot_state_tasks, - adjust_power_state_tasks, - disable_dynamic_state_management_tasks, - disable_clock_power_gatings_tasks, - cleanup_asic_tasks, - prepare_for_pnp_stop_tasks, - NULL -}; - -const struct action_chain uninitialize_action_chain = { - "Uninitialize", - uninitialize_event -}; - -static const pem_event_action * const power_source_change_event_pp_enabled[] = { - set_power_source_tasks, - set_power_saving_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - set_nbmcu_state_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain power_source_change_action_chain_pp_enabled = { - "Power source change - PowerPlay enabled", - power_source_change_event_pp_enabled -}; - -static const pem_event_action * const power_source_change_event_pp_disabled[] = { - set_power_source_tasks, - set_nbmcu_state_tasks, - NULL -}; - -const struct action_chain power_source_changes_action_chain_pp_disabled = { - "Power source change - PowerPlay disabled", - power_source_change_event_pp_disabled -}; - -static const pem_event_action * const power_source_change_event_hardware_dc[] = { - set_power_source_tasks, - set_power_saving_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - reset_hardware_dc_notification_tasks, - set_nbmcu_state_tasks, - broadcast_power_policy_tasks, - NULL -}; - -const struct action_chain power_source_change_action_chain_hardware_dc = { - "Power source change - with Hardware DC switching", - power_source_change_event_hardware_dc -}; - -static const pem_event_action * const suspend_event[] = { - reset_display_phy_access_tasks, - unregister_interrupt_tasks, - disable_gfx_voltage_island_power_gating_tasks, - disable_gfx_clock_gating_tasks, - notify_smu_suspend_tasks, - disable_smc_firmware_ctf_tasks, - set_boot_state_tasks, - adjust_power_state_tasks, - disable_fps_tasks, - vari_bright_suspend_tasks, - reset_fan_speed_to_default_tasks, - power_down_asic_tasks, - disable_stutter_mode_tasks, - set_connected_standby_tasks, - block_hw_access_tasks, - NULL -}; - -const struct action_chain suspend_action_chain = { - "Suspend", - suspend_event -}; - -static const pem_event_action * const resume_event[] = { - unblock_hw_access_tasks, - resume_connected_standby_tasks, - notify_smu_resume_tasks, - reset_display_configCounter_tasks, - update_dal_configuration_tasks, - vari_bright_resume_tasks, - setup_asic_tasks, - enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ - enable_dynamic_state_management_tasks, - enable_disable_bapm_tasks, - initialize_thermal_controller_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - adjust_power_state_tasks, - enable_disable_fps_tasks, - notify_hw_power_source_tasks, - process_vbios_event_info_tasks, - enable_gfx_clock_gating_tasks, - enable_gfx_voltage_island_power_gating_tasks, - reset_clock_gating_tasks, - notify_smu_vpu_recovery_end_tasks, - disable_vpu_cap_tasks, - execute_escape_sequence_tasks, - NULL -}; - - -const struct action_chain resume_action_chain = { - "resume", - resume_event -}; - -static const pem_event_action * const complete_init_event[] = { - unblock_adjust_power_state_tasks, - adjust_power_state_tasks, - enable_gfx_clock_gating_tasks, - enable_gfx_voltage_island_power_gating_tasks, - notify_power_state_change_tasks, - NULL -}; - -const struct action_chain complete_init_action_chain = { - "complete init", - complete_init_event -}; - -static const pem_event_action * const enable_gfx_clock_gating_event[] = { - enable_gfx_clock_gating_tasks, - NULL -}; - -const struct action_chain enable_gfx_clock_gating_action_chain = { - "enable gfx clock gate", - enable_gfx_clock_gating_event -}; - -static const pem_event_action * const disable_gfx_clock_gating_event[] = { - disable_gfx_clock_gating_tasks, - NULL -}; - -const struct action_chain disable_gfx_clock_gating_action_chain = { - "disable gfx clock gate", - disable_gfx_clock_gating_event -}; - -static const pem_event_action * const enable_cgpg_event[] = { - enable_cgpg_tasks, - NULL -}; - -const struct action_chain enable_cgpg_action_chain = { - "eable cg pg", - enable_cgpg_event -}; - -static const pem_event_action * const disable_cgpg_event[] = { - disable_cgpg_tasks, - NULL -}; - -const struct action_chain disable_cgpg_action_chain = { - "disable cg pg", - disable_cgpg_event -}; - - -/* Enable user _2d performance and activate */ - -static const pem_event_action * const enable_user_state_event[] = { - create_new_user_performance_state_tasks, - adjust_power_state_tasks, - NULL -}; - -const struct action_chain enable_user_state_action_chain = { - "Enable user state", - enable_user_state_event -}; - -static const pem_event_action * const enable_user_2d_performance_event[] = { - enable_user_2d_performance_tasks, - add_user_2d_performance_state_tasks, - set_performance_state_tasks, - adjust_power_state_tasks, - delete_user_2d_performance_state_tasks, - NULL -}; - -const struct action_chain enable_user_2d_performance_action_chain = { - "enable_user_2d_performance_event_activate", - enable_user_2d_performance_event -}; - - -static const pem_event_action * const disable_user_2d_performance_event[] = { - disable_user_2d_performance_tasks, - delete_user_2d_performance_state_tasks, - NULL -}; - -const struct action_chain disable_user_2d_performance_action_chain = { - "disable_user_2d_performance_event", - disable_user_2d_performance_event -}; - - -static const pem_event_action * const display_config_change_event[] = { - /* countDisplayConfigurationChangeEventTasks, */ - unblock_adjust_power_state_tasks, - set_cpu_power_state, - notify_hw_power_source_tasks, - get_2d_performance_state_tasks, - set_performance_state_tasks, - /* updateDALConfigurationTasks, - variBrightDisplayConfigurationChangeTasks, */ - adjust_power_state_tasks, - /*enableDisableFPSTasks, - setNBMCUStateTasks, - notifyPCIEDeviceReadyTasks,*/ - NULL -}; - -const struct action_chain display_config_change_action_chain = { - "Display configuration change", - display_config_change_event -}; - -static const pem_event_action * const readjust_power_state_event[] = { - adjust_power_state_tasks, - NULL -}; - -const struct action_chain readjust_power_state_action_chain = { - "re-adjust power state", - readjust_power_state_event -}; - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h deleted file mode 100644 index f181e53cdcda1..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_ACTION_CHAINS_H_ -#define _EVENT_ACTION_CHAINS_H_ -#include "eventmgr.h" - -extern const struct action_chain initialize_action_chain; - -extern const struct action_chain uninitialize_action_chain; - -extern const struct action_chain power_source_change_action_chain_pp_enabled; - -extern const struct action_chain power_source_changes_action_chain_pp_disabled; - -extern const struct action_chain power_source_change_action_chain_hardware_dc; - -extern const struct action_chain suspend_action_chain; - -extern const struct action_chain resume_action_chain; - -extern const struct action_chain complete_init_action_chain; - -extern const struct action_chain enable_gfx_clock_gating_action_chain; - -extern const struct action_chain disable_gfx_clock_gating_action_chain; - -extern const struct action_chain enable_cgpg_action_chain; - -extern const struct action_chain disable_cgpg_action_chain; - -extern const struct action_chain enable_user_2d_performance_action_chain; - -extern const struct action_chain disable_user_2d_performance_action_chain; - -extern const struct action_chain enable_user_state_action_chain; - -extern const struct action_chain readjust_power_state_action_chain; - -extern const struct action_chain display_config_change_action_chain; - -#endif /*_EVENT_ACTION_CHAINS_H_*/ - diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c deleted file mode 100644 index a3cd230d636d4..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmgr.h" -#include "eventinit.h" -#include "ppinterrupt.h" -#include "hardwaremanager.h" - -void pem_init_feature_info(struct pp_eventmgr *eventmgr) -{ - - /* PowerPlay info */ - eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable = - PP_StateUILabel_Performance; - - eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label = - PP_StateUILabel_Performance; - - eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable = - PP_StateUILabel_Battery; - - eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label = - PP_StateUILabel_Battery; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) { - eventmgr->features[PP_Feature_PowerPlay].supported = true; - eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION; - eventmgr->features[PP_Feature_PowerPlay].enabled_default = true; - eventmgr->features[PP_Feature_PowerPlay].enabled = true; - } else { - eventmgr->features[PP_Feature_PowerPlay].supported = false; - eventmgr->features[PP_Feature_PowerPlay].enabled = false; - eventmgr->features[PP_Feature_PowerPlay].enabled_default = false; - } - - eventmgr->features[PP_Feature_Force3DClock].supported = true; - eventmgr->features[PP_Feature_Force3DClock].enabled = false; - eventmgr->features[PP_Feature_Force3DClock].enabled_default = false; - eventmgr->features[PP_Feature_Force3DClock].version = 1; - - /* over drive*/ - eventmgr->features[PP_Feature_User2DPerformance].version = 4; - eventmgr->features[PP_Feature_User3DPerformance].version = 4; - eventmgr->features[PP_Feature_OverdriveTest].version = 4; - - eventmgr->features[PP_Feature_OverDrive].version = 4; - eventmgr->features[PP_Feature_OverDrive].enabled = false; - eventmgr->features[PP_Feature_OverDrive].enabled_default = false; - - eventmgr->features[PP_Feature_User2DPerformance].supported = false; - eventmgr->features[PP_Feature_User2DPerformance].enabled = false; - eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false; - - eventmgr->features[PP_Feature_User3DPerformance].supported = false; - eventmgr->features[PP_Feature_User3DPerformance].enabled = false; - eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false; - - eventmgr->features[PP_Feature_OverdriveTest].supported = false; - eventmgr->features[PP_Feature_OverdriveTest].enabled = false; - eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false; - - eventmgr->features[PP_Feature_OverDrive].supported = false; - - eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1; - eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false; - - /* Multi UVD States support */ - eventmgr->features[PP_Feature_MultiUVDState].supported = false; - eventmgr->features[PP_Feature_MultiUVDState].enabled = false; - eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false; - - /* Dynamic UVD States support */ - eventmgr->features[PP_Feature_DynamicUVDState].supported = false; - eventmgr->features[PP_Feature_DynamicUVDState].enabled = false; - eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false; - - /* VCE DPM support */ - eventmgr->features[PP_Feature_VCEDPM].supported = false; - eventmgr->features[PP_Feature_VCEDPM].enabled = false; - eventmgr->features[PP_Feature_VCEDPM].enabled_default = false; - - /* ACP PowerGating support */ - eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false; - eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false; - eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false; - - /* PPM support */ - eventmgr->features[PP_Feature_PPM].version = 1; - eventmgr->features[PP_Feature_PPM].supported = false; - eventmgr->features[PP_Feature_PPM].enabled = false; - - /* FFC support (enables fan and temp settings, Gemini needs temp settings) */ - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) || - phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) { - eventmgr->features[PP_Feature_FFC].version = 1; - eventmgr->features[PP_Feature_FFC].supported = true; - eventmgr->features[PP_Feature_FFC].enabled = true; - eventmgr->features[PP_Feature_FFC].enabled_default = true; - } else { - eventmgr->features[PP_Feature_FFC].supported = false; - eventmgr->features[PP_Feature_FFC].enabled = false; - eventmgr->features[PP_Feature_FFC].enabled_default = false; - } - - eventmgr->features[PP_Feature_VariBright].supported = false; - eventmgr->features[PP_Feature_VariBright].enabled = false; - eventmgr->features[PP_Feature_VariBright].enabled_default = false; - - eventmgr->features[PP_Feature_BACO].supported = false; - eventmgr->features[PP_Feature_BACO].supported = false; - eventmgr->features[PP_Feature_BACO].enabled_default = false; - - /* PowerDown feature support */ - eventmgr->features[PP_Feature_PowerDown].supported = false; - eventmgr->features[PP_Feature_PowerDown].enabled = false; - eventmgr->features[PP_Feature_PowerDown].enabled_default = false; - - eventmgr->features[PP_Feature_FPS].version = 1; - eventmgr->features[PP_Feature_FPS].supported = false; - eventmgr->features[PP_Feature_FPS].enabled_default = false; - eventmgr->features[PP_Feature_FPS].enabled = false; - - eventmgr->features[PP_Feature_ViPG].version = 1; - eventmgr->features[PP_Feature_ViPG].supported = false; - eventmgr->features[PP_Feature_ViPG].enabled_default = false; - eventmgr->features[PP_Feature_ViPG].enabled = false; -} - -static int thermal_interrupt_callback(void *private_data, - unsigned src_id, const uint32_t *iv_entry) -{ - /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/ - pr_info("current thermal is out of range \n"); - return 0; -} - -int pem_register_interrupts(struct pp_eventmgr *eventmgr) -{ - int result = 0; - struct pp_interrupt_registration_info info; - - info.call_back = thermal_interrupt_callback; - info.context = eventmgr; - - result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info); - - /* TODO: - * 2. Register CTF event interrupt - * 3. Register for vbios events interrupt - * 4. Register External Throttle Interrupt - * 5. Register Smc To Host Interrupt - * */ - return result; -} - - -int pem_unregister_interrupts(struct pp_eventmgr *eventmgr) -{ - return 0; -} - - -void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr) -{ - eventmgr->features[PP_Feature_MultiUVDState].supported = false; - eventmgr->features[PP_Feature_VariBright].supported = false; - eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false; - eventmgr->features[PP_Feature_OverDrive].supported = false; - eventmgr->features[PP_Feature_OverdriveTest].supported = false; - eventmgr->features[PP_Feature_User3DPerformance].supported = false; - eventmgr->features[PP_Feature_User2DPerformance].supported = false; - eventmgr->features[PP_Feature_PowerPlay].supported = false; - eventmgr->features[PP_Feature_Force3DClock].supported = false; -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h deleted file mode 100644 index 9ef96aab3f24f..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENTINIT_H_ -#define _EVENTINIT_H_ - -#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4 - -void pem_init_feature_info(struct pp_eventmgr *eventmgr); -void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr); -int pem_register_interrupts(struct pp_eventmgr *eventmgr); -int pem_unregister_interrupts(struct pp_eventmgr *eventmgr); - -#endif /* _EVENTINIT_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c deleted file mode 100644 index cd1ca07ef7f72..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmanagement.h" -#include "eventmgr.h" -#include "eventactionchains.h" - -int pem_init_event_action_chains(struct pp_eventmgr *eventmgr) -{ - int i; - - for (i = 0; i < AMD_PP_EVENT_MAX; i++) - eventmgr->event_chain[i] = NULL; - - eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr); - eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr); - return 0; -} - -int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data) -{ - const pem_event_action * const *paction_chain; - const pem_event_action *psub_chain; - int tmp_result = 0; - int result = 0; - - if (eventmgr == NULL || event_chain == NULL || event_data == NULL) - return -EINVAL; - - for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) { - if (0 != result) - return result; - - for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) { - tmp_result = (*psub_chain)(eventmgr, event_data); - if (0 == result) - result = tmp_result; - } - } - - return result; -} - -const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr) -{ - return &suspend_action_chain; -} - -const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr) -{ - return &initialize_action_chain; -} - -const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr) -{ - return &uninitialize_action_chain; -} - -const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr) -{ - return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/ -} - -const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr) -{ - return &resume_action_chain; -} - -const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_gfx_clock_gating_action_chain; -} - -const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr) -{ - return &disable_gfx_clock_gating_action_chain; -} - -const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_cgpg_action_chain; -} - -const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr) -{ - return &disable_cgpg_action_chain; -} - -const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr) -{ - return &complete_init_action_chain; -} - -const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr) -{ - return NULL; -} - -const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return &enable_user_state_action_chain; -} - -const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr) -{ - return &readjust_power_state_action_chain; -} - -const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr) -{ - return &display_config_change_action_chain; -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h deleted file mode 100644 index 383d4b295aa92..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_MANAGEMENT_H_ -#define _EVENT_MANAGEMENT_H_ - -#include "eventmgr.h" - -int pem_init_event_action_chains(struct pp_eventmgr *eventmgr); -int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data); -const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr); - -extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr); -extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr); -const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr); - - -#endif /* _EVENT_MANAGEMENT_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c deleted file mode 100644 index 3e3ca03bd3445..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include "eventmgr.h" -#include "hwmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" - -static int pem_init(struct pp_eventmgr *eventmgr) -{ - int result = 0; - struct pem_event_data event_data = { {0} }; - - /* Initialize PowerPlay feature info */ - pem_init_feature_info(eventmgr); - - /* Initialize event action chains */ - pem_init_event_action_chains(eventmgr); - - /* Call initialization event */ - result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data); - - /* if (0 != result) - return result; */ - - /* Register interrupt callback functions */ - result = pem_register_interrupts(eventmgr); - return 0; -} - -static void pem_fini(struct pp_eventmgr *eventmgr) -{ - struct pem_event_data event_data = { {0} }; - - pem_uninit_featureInfo(eventmgr); - pem_unregister_interrupts(eventmgr); - - pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); -} - -int eventmgr_early_init(struct pp_instance *handle) -{ - struct pp_eventmgr *eventmgr; - - if (handle == NULL) - return -EINVAL; - - eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL); - if (eventmgr == NULL) - return -ENOMEM; - - eventmgr->hwmgr = handle->hwmgr; - handle->eventmgr = eventmgr; - - eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor); - eventmgr->pp_eventmgr_init = pem_init; - eventmgr->pp_eventmgr_fini = pem_fini; - - return 0; -} - -static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data) -{ - if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL) - return -EINVAL; - - return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data); -} - -int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data) -{ - int r = 0; - - r = pem_handle_event_unlocked(eventmgr, event, event_data); - - return r; -} - -bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr) -{ - return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr)); -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c deleted file mode 100644 index b82c43af59ab3..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "eventmgr.h" -#include "eventsubchains.h" -#include "eventtasks.h" -#include "hardwaremanager.h" - -const pem_event_action reset_display_phy_access_tasks[] = { - pem_task_reset_display_phys_access, - NULL -}; - -const pem_event_action broadcast_power_policy_tasks[] = { - /* PEM_Task_BroadcastPowerPolicyChange, */ - NULL -}; - -const pem_event_action unregister_interrupt_tasks[] = { - pem_task_unregister_interrupts, - NULL -}; - -/* Disable GFX Voltage Islands Power Gating */ -const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = { - pem_task_disable_voltage_island_power_gating, - NULL -}; - -const pem_event_action disable_gfx_clockgating_tasks[] = { - pem_task_disable_gfx_clock_gating, - NULL -}; - -const pem_event_action block_adjust_power_state_tasks[] = { - pem_task_block_adjust_power_state, - NULL -}; - - -const pem_event_action unblock_adjust_power_state_tasks[] = { - pem_task_unblock_adjust_power_state, - NULL -}; - -const pem_event_action set_performance_state_tasks[] = { - pem_task_set_performance_state, - NULL -}; - -const pem_event_action get_2d_performance_state_tasks[] = { - pem_task_get_2D_performance_state_id, - NULL -}; - -const pem_event_action conditionally_force3D_performance_state_tasks[] = { - pem_task_conditionally_force_3d_performance_state, - NULL -}; - -const pem_event_action process_vbios_eventinfo_tasks[] = { - /* PEM_Task_ProcessVbiosEventInfo,*/ - NULL -}; - -const pem_event_action enable_dynamic_state_management_tasks[] = { - /* PEM_Task_ResetBAPMPolicyChangedFlag,*/ - pem_task_get_boot_state_id, - pem_task_enable_dynamic_state_management, - pem_task_register_interrupts, - NULL -}; - -const pem_event_action enable_clock_power_gatings_tasks[] = { - pem_task_enable_clock_power_gatings_tasks, - pem_task_powerdown_uvd_tasks, - pem_task_powerdown_vce_tasks, - NULL -}; - -const pem_event_action setup_asic_tasks[] = { - pem_task_setup_asic, - NULL -}; - -const pem_event_action power_budget_tasks[] = { - /* TODO - * PEM_Task_PowerBudgetWaiverAvailable, - * PEM_Task_PowerBudgetWarningMessage, - * PEM_Task_PruneStatesBasedOnPowerBudget, - */ - NULL -}; - -const pem_event_action system_config_tasks[] = { - /* PEM_Task_PruneStatesBasedOnSystemConfig,*/ - NULL -}; - - -const pem_event_action conditionally_force_3d_performance_state_tasks[] = { - pem_task_conditionally_force_3d_performance_state, - NULL -}; - -const pem_event_action ungate_all_display_phys_tasks[] = { - /* PEM_Task_GetDisplayPhyAccessInfo */ - NULL -}; - -const pem_event_action uninitialize_display_phy_access_tasks[] = { - /* PEM_Task_UninitializeDisplayPhysAccess, */ - NULL -}; - -const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = { - /* PEM_Task_DisableVoltageIslandPowerGating, */ - NULL -}; - -const pem_event_action disable_gfx_clock_gating_tasks[] = { - pem_task_disable_gfx_clock_gating, - NULL -}; - -const pem_event_action set_boot_state_tasks[] = { - pem_task_get_boot_state_id, - pem_task_set_boot_state, - NULL -}; - -const pem_event_action adjust_power_state_tasks[] = { - pem_task_notify_hw_mgr_display_configuration_change, - pem_task_adjust_power_state, - pem_task_notify_smc_display_config_after_power_state_adjustment, - pem_task_update_allowed_performance_levels, - /* to do pem_task_Enable_disable_bapm, */ - NULL -}; - -const pem_event_action disable_dynamic_state_management_tasks[] = { - pem_task_unregister_interrupts, - pem_task_get_boot_state_id, - pem_task_disable_dynamic_state_management, - NULL -}; - -const pem_event_action disable_clock_power_gatings_tasks[] = { - pem_task_disable_clock_power_gatings_tasks, - NULL -}; - -const pem_event_action cleanup_asic_tasks[] = { - /* PEM_Task_DisableFPS,*/ - pem_task_cleanup_asic, - NULL -}; - -const pem_event_action prepare_for_pnp_stop_tasks[] = { - /* PEM_Task_PrepareForPnpStop,*/ - NULL -}; - -const pem_event_action set_power_source_tasks[] = { - pem_task_set_power_source, - pem_task_notify_hw_of_power_source, - NULL -}; - -const pem_event_action set_power_saving_state_tasks[] = { - pem_task_reset_power_saving_state, - pem_task_get_power_saving_state, - pem_task_set_power_saving_state, - /* PEM_Task_ResetODDCState, - * PEM_Task_GetODDCState, - * PEM_Task_SetODDCState,*/ - NULL -}; - -const pem_event_action enable_disable_fps_tasks[] = { - /* PEM_Task_EnableDisableFPS,*/ - NULL -}; - -const pem_event_action set_nbmcu_state_tasks[] = { - /* PEM_Task_NBMCUStateChange,*/ - NULL -}; - -const pem_event_action reset_hardware_dc_notification_tasks[] = { - /* PEM_Task_ResetHardwareDCNotification,*/ - NULL -}; - - -const pem_event_action notify_smu_suspend_tasks[] = { - /* PEM_Task_NotifySMUSuspend,*/ - NULL -}; - -const pem_event_action disable_smc_firmware_ctf_tasks[] = { - pem_task_disable_smc_firmware_ctf, - NULL -}; - -const pem_event_action disable_fps_tasks[] = { - /* PEM_Task_DisableFPS,*/ - NULL -}; - -const pem_event_action vari_bright_suspend_tasks[] = { - /* PEM_Task_VariBright_Suspend,*/ - NULL -}; - -const pem_event_action reset_fan_speed_to_default_tasks[] = { - /* PEM_Task_ResetFanSpeedToDefault,*/ - NULL -}; - -const pem_event_action power_down_asic_tasks[] = { - /* PEM_Task_DisableFPS,*/ - pem_task_power_down_asic, - NULL -}; - -const pem_event_action disable_stutter_mode_tasks[] = { - /* PEM_Task_DisableStutterMode,*/ - NULL -}; - -const pem_event_action set_connected_standby_tasks[] = { - /* PEM_Task_SetConnectedStandby,*/ - NULL -}; - -const pem_event_action block_hw_access_tasks[] = { - pem_task_block_hw_access, - NULL -}; - -const pem_event_action unblock_hw_access_tasks[] = { - pem_task_un_block_hw_access, - NULL -}; - -const pem_event_action resume_connected_standby_tasks[] = { - /* PEM_Task_ResumeConnectedStandby,*/ - NULL -}; - -const pem_event_action notify_smu_resume_tasks[] = { - /* PEM_Task_NotifySMUResume,*/ - NULL -}; - -const pem_event_action reset_display_configCounter_tasks[] = { - pem_task_reset_display_phys_access, - NULL -}; - -const pem_event_action update_dal_configuration_tasks[] = { - /* PEM_Task_CheckVBlankTime,*/ - NULL -}; - -const pem_event_action vari_bright_resume_tasks[] = { - /* PEM_Task_VariBright_Resume,*/ - NULL -}; - -const pem_event_action notify_hw_power_source_tasks[] = { - pem_task_notify_hw_of_power_source, - NULL -}; - -const pem_event_action process_vbios_event_info_tasks[] = { - /* PEM_Task_ProcessVbiosEventInfo,*/ - NULL -}; - -const pem_event_action enable_gfx_clock_gating_tasks[] = { - pem_task_enable_gfx_clock_gating, - NULL -}; - -const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = { - pem_task_enable_voltage_island_power_gating, - NULL -}; - -const pem_event_action reset_clock_gating_tasks[] = { - /* PEM_Task_ResetClockGating*/ - NULL -}; - -const pem_event_action notify_smu_vpu_recovery_end_tasks[] = { - /* PEM_Task_NotifySmuVPURecoveryEnd,*/ - NULL -}; - -const pem_event_action disable_vpu_cap_tasks[] = { - /* PEM_Task_DisableVPUCap,*/ - NULL -}; - -const pem_event_action execute_escape_sequence_tasks[] = { - /* PEM_Task_ExecuteEscapesequence,*/ - NULL -}; - -const pem_event_action notify_power_state_change_tasks[] = { - pem_task_notify_power_state_change, - NULL -}; - -const pem_event_action enable_cgpg_tasks[] = { - pem_task_enable_cgpg, - NULL -}; - -const pem_event_action disable_cgpg_tasks[] = { - pem_task_disable_cgpg, - NULL -}; - -const pem_event_action enable_user_2d_performance_tasks[] = { - /* PEM_Task_SetUser2DPerformanceFlag,*/ - /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/ - NULL -}; - -const pem_event_action add_user_2d_performance_state_tasks[] = { - /* PEM_Task_Get2DPerformanceTemplate,*/ - /* PEM_Task_AllocateNewPowerStateMemory,*/ - /* PEM_Task_CopyNewPowerStateInfo,*/ - /* PEM_Task_UpdateNewPowerStateClocks,*/ - /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/ - /* PEM_Task_AddPowerState,*/ - /* PEM_Task_ReleaseNewPowerStateMemory,*/ - NULL -}; - -const pem_event_action delete_user_2d_performance_state_tasks[] = { - /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/ - /* PEM_Task_DeletePowerState,*/ - /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/ - NULL -}; - -const pem_event_action disable_user_2d_performance_tasks[] = { - /* PEM_Task_ResetUser2DPerformanceFlag,*/ - /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/ - NULL -}; - -const pem_event_action enable_stutter_mode_tasks[] = { - pem_task_enable_stutter_mode, - NULL -}; - -const pem_event_action enable_disable_bapm_tasks[] = { - /*PEM_Task_EnableDisableBAPM,*/ - NULL -}; - -const pem_event_action reset_boot_state_tasks[] = { - pem_task_reset_boot_state, - NULL -}; - -const pem_event_action create_new_user_performance_state_tasks[] = { - pem_task_create_user_performance_state, - NULL -}; - -const pem_event_action initialize_thermal_controller_tasks[] = { - pem_task_initialize_thermal_controller, - NULL -}; - -const pem_event_action uninitialize_thermal_controller_tasks[] = { - pem_task_uninitialize_thermal_controller, - NULL -}; - -const pem_event_action set_cpu_power_state[] = { - pem_task_set_cpu_power_state, - NULL -}; \ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h deleted file mode 100644 index 7714cb927428e..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENT_SUB_CHAINS_H_ -#define _EVENT_SUB_CHAINS_H_ - -#include "eventmgr.h" - -extern const pem_event_action reset_display_phy_access_tasks[]; -extern const pem_event_action broadcast_power_policy_tasks[]; -extern const pem_event_action unregister_interrupt_tasks[]; -extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[]; -extern const pem_event_action disable_GFX_clockgating_tasks[]; -extern const pem_event_action block_adjust_power_state_tasks[]; -extern const pem_event_action unblock_adjust_power_state_tasks[]; -extern const pem_event_action set_performance_state_tasks[]; -extern const pem_event_action get_2D_performance_state_tasks[]; -extern const pem_event_action conditionally_force3D_performance_state_tasks[]; -extern const pem_event_action process_vbios_eventinfo_tasks[]; -extern const pem_event_action enable_dynamic_state_management_tasks[]; -extern const pem_event_action enable_clock_power_gatings_tasks[]; -extern const pem_event_action conditionally_force3D_performance_state_tasks[]; -extern const pem_event_action setup_asic_tasks[]; -extern const pem_event_action power_budget_tasks[]; -extern const pem_event_action system_config_tasks[]; -extern const pem_event_action get_2d_performance_state_tasks[]; -extern const pem_event_action conditionally_force_3d_performance_state_tasks[]; -extern const pem_event_action ungate_all_display_phys_tasks[]; -extern const pem_event_action uninitialize_display_phy_access_tasks[]; -extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[]; -extern const pem_event_action disable_gfx_clock_gating_tasks[]; -extern const pem_event_action set_boot_state_tasks[]; -extern const pem_event_action adjust_power_state_tasks[]; -extern const pem_event_action disable_dynamic_state_management_tasks[]; -extern const pem_event_action disable_clock_power_gatings_tasks[]; -extern const pem_event_action cleanup_asic_tasks[]; -extern const pem_event_action prepare_for_pnp_stop_tasks[]; -extern const pem_event_action set_power_source_tasks[]; -extern const pem_event_action set_power_saving_state_tasks[]; -extern const pem_event_action enable_disable_fps_tasks[]; -extern const pem_event_action set_nbmcu_state_tasks[]; -extern const pem_event_action reset_hardware_dc_notification_tasks[]; -extern const pem_event_action notify_smu_suspend_tasks[]; -extern const pem_event_action disable_smc_firmware_ctf_tasks[]; -extern const pem_event_action disable_fps_tasks[]; -extern const pem_event_action vari_bright_suspend_tasks[]; -extern const pem_event_action reset_fan_speed_to_default_tasks[]; -extern const pem_event_action power_down_asic_tasks[]; -extern const pem_event_action disable_stutter_mode_tasks[]; -extern const pem_event_action set_connected_standby_tasks[]; -extern const pem_event_action block_hw_access_tasks[]; -extern const pem_event_action unblock_hw_access_tasks[]; -extern const pem_event_action resume_connected_standby_tasks[]; -extern const pem_event_action notify_smu_resume_tasks[]; -extern const pem_event_action reset_display_configCounter_tasks[]; -extern const pem_event_action update_dal_configuration_tasks[]; -extern const pem_event_action vari_bright_resume_tasks[]; -extern const pem_event_action notify_hw_power_source_tasks[]; -extern const pem_event_action process_vbios_event_info_tasks[]; -extern const pem_event_action enable_gfx_clock_gating_tasks[]; -extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[]; -extern const pem_event_action reset_clock_gating_tasks[]; -extern const pem_event_action notify_smu_vpu_recovery_end_tasks[]; -extern const pem_event_action disable_vpu_cap_tasks[]; -extern const pem_event_action execute_escape_sequence_tasks[]; -extern const pem_event_action notify_power_state_change_tasks[]; -extern const pem_event_action enable_cgpg_tasks[]; -extern const pem_event_action disable_cgpg_tasks[]; -extern const pem_event_action enable_user_2d_performance_tasks[]; -extern const pem_event_action add_user_2d_performance_state_tasks[]; -extern const pem_event_action delete_user_2d_performance_state_tasks[]; -extern const pem_event_action disable_user_2d_performance_tasks[]; -extern const pem_event_action enable_stutter_mode_tasks[]; -extern const pem_event_action enable_disable_bapm_tasks[]; -extern const pem_event_action reset_boot_state_tasks[]; -extern const pem_event_action create_new_user_performance_state_tasks[]; -extern const pem_event_action initialize_thermal_controller_tasks[]; -extern const pem_event_action uninitialize_thermal_controller_tasks[]; -extern const pem_event_action set_cpu_power_state[]; -#endif /* _EVENT_SUB_CHAINS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c deleted file mode 100644 index 8c4ebaae1e0ca..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "eventmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" -#include "eventmanager.h" -#include "hardwaremanager.h" -#include "eventtasks.h" -#include "power_state.h" -#include "hwmgr.h" -#include "amd_powerplay.h" -#include "psm.h" - -#define TEMP_RANGE_MIN (90 * 1000) -#define TEMP_RANGE_MAX (120 * 1000) - -int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - - if (eventmgr == NULL || eventmgr->hwmgr == NULL) - return -EINVAL; - - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level); - - return 0; -} - -/* eventtasks_generic.c */ -int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct pp_hwmgr *hwmgr; - - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - hwmgr = eventmgr->hwmgr; - if (event_data->pnew_power_state != NULL) - hwmgr->request_ps = event_data->pnew_power_state; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) - psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules); - else - psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules); - - return 0; -} - -int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_power_down_asic(eventmgr->hwmgr); -} - -int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) - return psm_set_states(eventmgr, &(event_data->requested_state_id)); - - return 0; -} - -int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return pem_unregister_interrupts(eventmgr); -} - -int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - int result; - - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_Boot, - &(event_data->requested_state_id) - ); - - if (0 == result) - pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - else - pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - - return result; -} - -int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_enable_dynamic_state_management(eventmgr->hwmgr); -} - -int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_disable_dynamic_state_management(eventmgr->hwmgr); -} - -int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_enable_clock_power_gatings(eventmgr->hwmgr); -} - -int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_powerdown_uvd(eventmgr->hwmgr); -} - -int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - phm_powergate_uvd(eventmgr->hwmgr, true); - phm_powergate_vce(eventmgr->hwmgr, true); - return 0; -} - -int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - phm_disable_clock_power_gatings(eventmgr->hwmgr); - return 0; -} - -int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_disable_smc_firmware_ctf(eventmgr->hwmgr); -} - -int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_setup_asic(eventmgr->hwmgr); -} - -int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config) -{ - /* TODO */ - return 0; - /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */ -} - -int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - return phm_display_configuration_changed(eventmgr->hwmgr); -} - -int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return 0; -} - -int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_hw_access_blocked(eventmgr)) - return 0; - - return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr); -} - -int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - eventmgr->block_adjust_power_state = true; - /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/ - return 0; -} - -int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - eventmgr->block_adjust_power_state = false; - return 0; -} - -int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_set_cpu_power_state(eventmgr->hwmgr); -} - -/*powersaving*/ - -int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - - -int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - - -/* performance */ -int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID)) - return psm_set_states(eventmgr, &(event_data->requested_state_id)); - - return 0; -} - -int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - /* TODO */ - return 0; -} - -int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - int result; - - if (eventmgr->features[PP_Feature_PowerPlay].supported && - !(eventmgr->features[PP_Feature_PowerPlay].enabled)) - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_Boot, - &(event_data->requested_state_id)); - else if (eventmgr->features[PP_Feature_User2DPerformance].enabled) - result = psm_get_state_by_classification(eventmgr, - PP_StateClassificationFlag_User2DPerformance, - &(event_data->requested_state_id)); - else - result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance, - &(event_data->requested_state_id)); - - if (0 == result) - pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - else - pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID); - - return result; -} - -int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - -restart_search: - for (i = 0; i < table_entries; i++) { - if (state->classification.ui_label & event_data->requested_ui_label) { - event_data->pnew_power_state = state; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - - switch (event_data->requested_ui_label) { - case PP_StateUILabel_Battery: - case PP_StateUILabel_Balanced: - event_data->requested_ui_label = PP_StateUILabel_Performance; - goto restart_search; - default: - break; - } - return -1; -} - -int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - struct PP_TemperatureRange range; - - range.max = TEMP_RANGE_MAX; - range.min = TEMP_RANGE_MIN; - - if (eventmgr == NULL || eventmgr->platform_descriptor == NULL) - return -EINVAL; - - if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController)) - return phm_start_thermal_controller(eventmgr->hwmgr, &range); - - return 0; -} - -int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) -{ - return phm_stop_thermal_controller(eventmgr->hwmgr); -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h deleted file mode 100644 index 37e7ca5a58e0a..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENT_TASKS_H_ -#define _EVENT_TASKS_H_ -#include "eventmgr.h" - -struct amd_display_configuration; - -/* eventtasks_generic.c */ -int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config); -int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -/*powersaving*/ - -int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); - -/* performance */ -int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -/*thermal */ -int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); -int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data); - -#endif /* _EVENT_TASKS_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c deleted file mode 100644 index 489908887e9c0..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "psm.h" - -int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->classification.ui_label & ui_label) { - *state_id = state->id; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->classification.flags & flag) { - *state_id = state->id; - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id) -{ - struct pp_power_state *state; - int table_entries; - struct pp_hwmgr *hwmgr = eventmgr->hwmgr; - int i; - - table_entries = hwmgr->num_ps; - - state = hwmgr->ps; - - for (i = 0; i < table_entries; i++) { - if (state->id == *state_id) { - memcpy(hwmgr->request_ps, state, hwmgr->ps_size); - return 0; - } - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); - } - return -1; -} - -int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) -{ - - struct pp_power_state *pcurrent; - struct pp_power_state *requested; - struct pp_hwmgr *hwmgr; - bool equal; - - if (skip) - return 0; - - hwmgr = eventmgr->hwmgr; - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - if (requested == NULL) - return 0; - - phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); - - if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) - equal = false; - - if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { - phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); - memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); - } - return 0; -} - -int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip) -{ - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h deleted file mode 100644 index fbdff3e02aa39..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "eventmgr.h" -#include "eventinit.h" -#include "eventmanagement.h" -#include "eventmanager.h" -#include "power_state.h" -#include "hardwaremanager.h" - -int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id); - -int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id); - -int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id); - -int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip); - -int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip); diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h deleted file mode 100644 index b9d84de8a44d5..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _EVENT_MANAGER_H_ -#define _EVENT_MANAGER_H_ - -#include "power_state.h" -#include "pp_power_source.h" -#include "hardwaremanager.h" -#include "pp_asicblocks.h" - -struct pp_eventmgr; -enum amd_pp_event; - -enum PEM_EventDataValid { - PEM_EventDataValid_RequestedStateID = 0, - PEM_EventDataValid_RequestedUILabel, - PEM_EventDataValid_NewPowerState, - PEM_EventDataValid_RequestedPowerSource, - PEM_EventDataValid_RequestedClocks, - PEM_EventDataValid_CurrentTemperature, - PEM_EventDataValid_AsicBlocks, - PEM_EventDataValid_ODParameters, - PEM_EventDataValid_PXAdapterPrefs, - PEM_EventDataValid_PXUserPrefs, - PEM_EventDataValid_PXSwitchReason, - PEM_EventDataValid_PXSwitchPhase, - PEM_EventDataValid_HdVideo, - PEM_EventDataValid_BacklightLevel, - PEM_EventDatavalid_VariBrightParams, - PEM_EventDataValid_VariBrightLevel, - PEM_EventDataValid_VariBrightImmediateChange, - PEM_EventDataValid_PercentWhite, - PEM_EventDataValid_SdVideo, - PEM_EventDataValid_HTLinkChangeReason, - PEM_EventDataValid_HWBlocks, - PEM_EventDataValid_RequestedThermalState, - PEM_EventDataValid_MvcVideo, - PEM_EventDataValid_Max -}; - -typedef enum PEM_EventDataValid PEM_EventDataValid; - -/* Number of bits in ULONG variable */ -#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8) - -/* Number of ULONG entries used by event data valid bits */ -#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \ - ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \ - PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD) - -static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) -{ - fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |= - (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field) -{ - fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &= - ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field) -{ - return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] & - (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)); -} - -struct pem_event_data { - unsigned long valid_fields[100]; - unsigned long requested_state_id; - enum PP_StateUILabel requested_ui_label; - struct pp_power_state *pnew_power_state; - enum pp_power_source requested_power_source; - struct PP_Clocks requested_clocks; - bool skip_state_adjust_rules; - struct phm_asic_blocks asic_blocks; - /* to doPP_ThermalState requestedThermalState; - enum ThermalStateRequestSrc requestThermalStateSrc; - PP_Temperature currentTemperature;*/ - -}; - -int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, - struct pem_event_data *event_data); - -bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr); - -#endif /* _EVENT_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h deleted file mode 100644 index 7bd8a7e570808..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _EVENTMGR_H_ -#define _EVENTMGR_H_ - -#include -#include "pp_instance.h" -#include "hardwaremanager.h" -#include "eventmanager.h" -#include "pp_feature.h" -#include "pp_power_source.h" -#include "power_state.h" - -typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr, - struct pem_event_data *event_data); - -struct action_chain { - const char *description; /* action chain description for debugging purpose */ - const pem_event_action * const *action_chain; /* pointer to chain of event actions */ -}; - -struct pem_power_source_ui_state_info { - enum PP_StateUILabel current_ui_label; - enum PP_StateUILabel default_ui_lable; - unsigned long configurable_ui_mapping; -}; - -struct pp_clock_range { - uint32_t min_sclk_khz; - uint32_t max_sclk_khz; - - uint32_t min_mclk_khz; - uint32_t max_mclk_khz; - - uint32_t min_vclk_khz; - uint32_t max_vclk_khz; - - uint32_t min_dclk_khz; - uint32_t max_dclk_khz; - - uint32_t min_aclk_khz; - uint32_t max_aclk_khz; - - uint32_t min_eclk_khz; - uint32_t max_eclk_khz; -}; - -enum pp_state { - UNINITIALIZED, - INACTIVE, - ACTIVE -}; - -enum pp_ring_index { - PP_RING_TYPE_GFX_INDEX = 0, - PP_RING_TYPE_DMA_INDEX, - PP_RING_TYPE_DMA1_INDEX, - PP_RING_TYPE_UVD_INDEX, - PP_RING_TYPE_VCE0_INDEX, - PP_RING_TYPE_VCE1_INDEX, - PP_RING_TYPE_CP1_INDEX, - PP_RING_TYPE_CP2_INDEX, - PP_NUM_RINGS, -}; - -struct pp_request { - uint32_t flags; - uint32_t sclk; - uint32_t sclk_throttle; - uint32_t mclk; - uint32_t vclk; - uint32_t dclk; - uint32_t eclk; - uint32_t aclk; - uint32_t iclk; - uint32_t vp8clk; - uint32_t rsv[32]; -}; - -struct pp_eventmgr { - struct pp_hwmgr *hwmgr; - struct pp_smumgr *smumgr; - - struct pp_feature_info features[PP_Feature_Max]; - const struct action_chain *event_chain[AMD_PP_EVENT_MAX]; - struct phm_platform_descriptor *platform_descriptor; - struct pp_clock_range clock_range; - enum pp_power_source current_power_source; - struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max]; - enum pp_state states[PP_NUM_RINGS]; - struct pp_request hi_req; - struct list_head context_list; - struct mutex lock; - bool block_adjust_power_state; - bool enable_cg; - bool enable_gfx_cgpg; - int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr); - void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr); -}; - -int eventmgr_early_init(struct pp_instance *handle); - -#endif /* _EVENTMGR_H_ */ From 9947f7047f2b57735fa5c76d63728642a1850527 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 29 Aug 2017 16:08:56 +0800 Subject: [PATCH 084/232] drm/amd/powerplay: add UMD P-state in powerplay. This feature is for UMD to run benchmark in a power state that is as steady as possible. kmd need to fix the power state as stable as possible. now, kmd support four level: profile_standard,peak,min_sclk,min_mclk move common related code to amd_powerplay.c Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 47 ++++++++++++++- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 36 ------------ .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 58 ++++--------------- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 53 +++-------------- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 4 +- 5 files changed, 66 insertions(+), 132 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 94bed3c08161b..75c810f93e9e0 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -30,7 +30,6 @@ #include "pp_instance.h" #include "power_state.h" - static inline int pp_check(struct pp_instance *handle) { if (handle == NULL || handle->pp_valid != PP_VALID) @@ -287,6 +286,42 @@ static int pp_dpm_fw_loading_complete(void *handle) return 0; } +static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level *level) +{ + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter umd pstate, save current level, disable gfx cg*/ + if (*level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + hwmgr->en_umd_pstate = true; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + } + } else { + /* exit umd pstate, restore level, enable gfx cg*/ + if (!(*level & profile_mode_mask)) { + if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + *level = hwmgr->saved_dpm_level; + hwmgr->en_umd_pstate = false; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + } + } +} + static int pp_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { @@ -301,14 +336,22 @@ static int pp_dpm_force_performance_level(void *handle, hwmgr = pp_handle->hwmgr; + if (level == hwmgr->dpm_level) + return 0; + if (hwmgr->hwmgr_func->force_dpm_level == NULL) { pr_info("%s was not implemented.\n", __func__); return 0; } mutex_lock(&pp_handle->pp_lock); + pp_dpm_en_umd_pstate(hwmgr, &level); + hwmgr->request_dpm_level = level; hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); - hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + if (!ret) + hwmgr->dpm_level = hwmgr->request_dpm_level; + mutex_unlock(&pp_handle->pp_lock); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index b9c61ece67840..a301003f6e2a1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1314,57 +1314,21 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { int ret = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ret = cz_phm_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: ret = cz_phm_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = cz_phm_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_MANUAL: - hwmgr->dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index eb8a3ff70cf78..dfe06d98304ce 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2568,51 +2568,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t pcie_mask = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = smu7_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: ret = smu7_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = smu7_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -2621,26 +2586,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); if (ret) return ret; - hwmgr->dpm_level = level; smu7_force_clock_level(hwmgr, PP_SCLK, 1<dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; + if (!ret) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); + } + return ret; } static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) @@ -4245,9 +4207,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | - AMD_DPM_FORCED_LEVEL_LOW | - AMD_DPM_FORCED_LEVEL_HIGH)) + if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f8f02e70b8bc0..2e776edf9b8d5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4306,51 +4306,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t soc_mask = 0; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - - if (level == hwmgr->dpm_level) - return ret; - - if (!(hwmgr->dpm_level & profile_mode_mask)) { - /* enter profile mode, save current level, disable gfx cg*/ - if (level & profile_mode_mask) { - hwmgr->saved_dpm_level = hwmgr->dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } - } else { - /* exit profile mode, restore level, enable gfx cg*/ - if (!(level & profile_mode_mask)) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) - level = hwmgr->saved_dpm_level; - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - } - } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = vega10_force_dpm_highest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: ret = vega10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = vega10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -4359,24 +4324,22 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); if (ret) return ret; - hwmgr->dpm_level = level; vega10_force_clock_level(hwmgr, PP_SCLK, 1<dpm_level = level; - break; case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); - - return 0; + if (!ret) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); + } + return ret; } static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) @@ -4624,7 +4587,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); int i; - if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | AMD_DPM_FORCED_LEVEL_LOW | AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index c649354f08ca9..3bbe7d5cb6de1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -747,6 +747,7 @@ struct pp_hwmgr { enum amd_dpm_forced_level dpm_level; enum amd_dpm_forced_level saved_dpm_level; + enum amd_dpm_forced_level request_dpm_level; bool block_hw_access; struct phm_gfx_arbiter gfx_arbiter; struct phm_acp_arbiter acp_arbiter; @@ -786,12 +787,13 @@ struct pp_hwmgr { struct amd_pp_display_configuration display_config; uint32_t feature_mask; - /* power profile */ + /* UMD Pstate */ struct amd_pp_profile gfx_power_profile; struct amd_pp_profile compute_power_profile; struct amd_pp_profile default_gfx_power_profile; struct amd_pp_profile default_compute_power_profile; enum amd_pp_profile_type current_power_profile; + bool en_umd_pstate; }; extern int hwmgr_early_init(struct pp_instance *handle); From 238e793f4704e794bd3bc04b0e8fadcf0334b495 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 29 Aug 2017 17:07:38 +0800 Subject: [PATCH 085/232] drm/amd/powerplay: set uvd/vce/nb/mclk level as UMD P-state required Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index a301003f6e2a1..7f3b24f42e30a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1138,7 +1138,11 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_ps->action = cz_current_ps->action; - if (!force_high && (cz_ps->action == FORCE_HIGH)) + if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + cz_nbdpm_pstate_enable_disable(hwmgr, false, false); + else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) + cz_nbdpm_pstate_enable_disable(hwmgr, false, true); + else if (!force_high && (cz_ps->action == FORCE_HIGH)) cz_ps->action = CANCEL_FORCE_HIGH; else if (force_high && (cz_ps->action != FORCE_HIGH)) cz_ps->action = FORCE_HIGH; @@ -1374,7 +1378,8 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) if (!bgate) { /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + PHM_PlatformCaps_StablePState) + || hwmgr->en_umd_pstate) { cz_hwmgr->uvd_dpm.hard_min_clk = ptable->entries[ptable->count - 1].vclk; @@ -1403,7 +1408,8 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + PHM_PlatformCaps_StablePState) + || hwmgr->en_umd_pstate) { cz_hwmgr->vce_dpm.hard_min_clk = ptable->entries[ptable->count - 1].ecclk; From a2138eaf97b4e053b229fe07e1bb4ecbe07e6769 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 11 Aug 2017 17:49:48 +0800 Subject: [PATCH 086/232] drm/amdgpu: fix wait_any_fence first is incorrect if hit NULL/signaled fence Signed-off-by: Monk Liu Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b57adc0723cb7..233b6f2f84279 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1384,6 +1384,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, array[i] = fence; } else { /* NULL, the fence has been already signaled */ r = 1; + first = i; goto out; } } From 7a9667ae197460e6c9c3bb432fe68c708fce6259 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 5 Sep 2017 07:30:59 -0400 Subject: [PATCH 087/232] drm/ttm: Fix configuration error around populate_and_map() functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed kbuild errors when IOMMU/SWIOTLB are disabled. Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6a660d196d877..052e1f102113f 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -920,6 +920,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); +#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) { unsigned i; @@ -960,6 +961,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) ttm_pool_unpopulate(&tt->ttm); } EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); +#endif int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { From a216ab09955d6b77f3af4f0aba9255c5ddf382f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 2 Sep 2017 13:21:31 +0200 Subject: [PATCH 088/232] drm/amdgpu: fix userptr put_page handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move calling put_page into the unpopulate callback. Otherwise we mess up the pages reference count when it is unbound multiple times. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 +++++++++++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5809f55e0d9d7..cc6de0b46326b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1793,6 +1793,7 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 233b6f2f84279..e58db0c69c6a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -474,10 +474,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, /* Check if we have user pages and nobody bound the BO already */ if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { - size_t size = sizeof(struct page *); - - size *= bo->tbo.ttm->num_pages; - memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, + lobj->user_pages); binding_userptr = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 841a5699bef05..28e1219843329 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -664,6 +664,18 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) return r; } +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) +{ + unsigned i; + + for (i = 0; i < ttm->num_pages; ++i) { + if (ttm->pages[i]) + put_page(ttm->pages[i]); + + ttm->pages[i] = pages ? pages[i] : NULL; + } +} + static void amdgpu_trace_dma_map(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -738,7 +750,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) set_page_dirty(page); mark_page_accessed(page); - put_page(page); } amdgpu_trace_dma_unmap(ttm); @@ -971,6 +982,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { + amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); ttm->page_flags &= ~TTM_PAGE_FLAG_SG; return; From 64d03abe6ee36ec48e997743e9397ae160eb508a Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 5 Sep 2017 07:55:48 -0400 Subject: [PATCH 089/232] drm/amd/powerplay: Fix psm_set_user_performance_state() We now pass a pointer to a pointer which seems to be what they meant in the first place. The previous version was modifying a pointer passed by value. Fixes bug that was introduced by commit 332798d40c2e91:drm/amd/powerplay: delete eventmgr layer in poweprlay Signed-off-by: Tom St Denis Reviewed-By: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 4 ++-- drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 8 ++++---- drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index c6157bcdf7d69..4f1b932361b25 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -294,7 +294,7 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, { enum amd_pm_state_type ps; enum PP_StateUILabel requested_ui_label; - struct pp_power_state *requested_ps; + struct pp_power_state *requested_ps = NULL; if (input == NULL) { ret = -EINVAL; @@ -303,7 +303,7 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, ps = *(unsigned long *)input; requested_ui_label = power_state_convert(ps); - ret = psm_set_user_performance_state(hwmgr, requested_ui_label, requested_ps); + ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); if (ret) return ret; ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 7656324957a8d..167cdc321db28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -188,19 +188,19 @@ int psm_set_performance_states(struct pp_hwmgr *hwmgr) int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, enum PP_StateUILabel label_id, - struct pp_power_state *state) + struct pp_power_state **state) { int table_entries; int i; table_entries = hwmgr->num_ps; - state = hwmgr->ps; + *state = hwmgr->ps; restart_search: for (i = 0; i < table_entries; i++) { - if (state->classification.ui_label & label_id) + if ((*state)->classification.ui_label & label_id) return 0; - state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); + *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size); } switch (label_id) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h index aa44e60ec1b6e..fa1b6825036a1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h @@ -32,7 +32,7 @@ int psm_set_boot_states(struct pp_hwmgr *hwmgr); int psm_set_performance_states(struct pp_hwmgr *hwmgr); int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, enum PP_StateUILabel label_id, - struct pp_power_state *state); + struct pp_power_state **state); int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, struct pp_power_state *new_ps); From aa4ec7ce7ec52c7230cfa73b06d79288b45fe1c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 5 Sep 2017 15:10:50 +0200 Subject: [PATCH 090/232] drm/amdgpu: revert "fix deadlock of reservation between cs and gpu reset v2" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 10e709cb296c98424c03408d23e3addeddcd4088. The patch doesn't work at all: 1. The CS can still be blocked because of amdgpu_ctx_add_fence(). 2. The order of submission isn't correct any more. 3. We could end up using freed up memory because we now drop the ctx reference to early. This needs to be fixed cleanly by doing the context handling after the BO handling, but this is a larger task just avoid the obvious crashes for now. Signed-off-by: Christian König Reviewed-by: Monk Liu monk.liu@amd.com Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e58db0c69c6a2..3fe816f6beca2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1148,7 +1148,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); - amdgpu_cs_parser_fini(p, 0, true); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); @@ -1206,10 +1205,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; r = amdgpu_cs_submit(&parser, cs); - if (r) - goto out; - return 0; out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; From 378e2d5b504fe0231c557751e58b80fcf717cc20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Sep 2017 20:58:45 +0200 Subject: [PATCH 091/232] drm/ttm: fix ttm_bo_cleanup_refs_or_queue once more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With shared reservation objects __ttm_bo_reserve() can easily fail even on destroyed BOs. This prevents correct handling when we need to individualize the reservation object. Fix this by individualizing the object before even trying to reserve it. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 180ce62964161..bee77d31895b3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; int ret; + ret = ttm_bo_individualize_resv(bo); + if (ret) { + /* Last resort, if we fail to allocate memory for the + * fences block for the BO to become idle + */ + reservation_object_wait_timeout_rcu(bo->resv, true, false, + 30 * HZ); + spin_lock(&glob->lru_lock); + goto error; + } + spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, NULL); - if (!ret) { - if (!ttm_bo_wait(bo, false, true)) { + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); ttm_bo_cleanup_memtype_use(bo); - return; } - ret = ttm_bo_individualize_resv(bo); - if (ret) { - /* Last resort, if we fail to allocate memory for the - * fences block for the BO to become idle and free it. - */ - spin_unlock(&glob->lru_lock); - ttm_bo_wait(bo, true, true); - ttm_bo_cleanup_memtype_use(bo); - return; - } ttm_bo_flush_all_fences(bo); /* @@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } - if (bo->resv != &bo->ttm_resv) - reservation_object_unlock(&bo->ttm_resv); __ttm_bo_unreserve(bo); } + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); +error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); From 862095237c392887819e98018d6dc187d8c78726 Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 7 Sep 2017 13:23:21 +0200 Subject: [PATCH 092/232] drm/amdgpu: Account for shadow PTs in mapping update IB size. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When amdgpu_vm_frag_ptes calls amdgpu_vm_update_ptes and the pt has a shadow PT we mirror all the write to the shadow PT too, which results in twice the commands. Signed-off-by: Bas Nieuwenhuizen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c9223a5184dee..545531db66db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1536,10 +1536,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, nptes = last - start + 1; /* - * reserve space for one command every (1 << BLOCK_SIZE) + * reserve space for two commands every (1 << BLOCK_SIZE) * entries or 2k dwords (whatever is smaller) + * + * The second command is for the shadow pagetables. */ - ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1; + ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2; /* padding, etc. */ ndw = 64; From b72cf4fca2bb786e20864b5e8755105aa9626fb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sun, 3 Sep 2017 15:22:06 +0200 Subject: [PATCH 093/232] drm/amdgpu: move taking mmap_sem into get_user_pages v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This didn't helped as intended, just simplify the code. v2: unlock mmap_sem in the error path as well Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 +----------- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++++++- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3fe816f6beca2..283a216ee758a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -500,18 +500,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_bo_list_entry *e; struct list_head duplicates; - bool need_mmap_lock = false; unsigned i, tries = 10; int r; INIT_LIST_HEAD(&p->validated); p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); - if (p->bo_list) { - need_mmap_lock = p->bo_list->first_userptr != - p->bo_list->num_entries; + if (p->bo_list) amdgpu_bo_list_get_list(p->bo_list, &p->validated); - } INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); @@ -519,9 +515,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (p->uf_entry.robj) list_add(&p->uf_entry.tv.head, &p->validated); - if (need_mmap_lock) - down_read(¤t->mm->mmap_sem); - while (1) { struct list_head need_pages; unsigned i; @@ -674,9 +667,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, error_free_pages: - if (need_mmap_lock) - up_read(¤t->mm->mmap_sem); - if (p->bo_list) { for (i = p->bo_list->first_userptr; i < p->bo_list->num_entries; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index f1e61b3df6408..b0d45c8e6bb3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -318,8 +318,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, } if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - down_read(¤t->mm->mmap_sem); - r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages); if (r) @@ -334,8 +332,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, amdgpu_bo_unreserve(bo); if (r) goto free_pages; - - up_read(¤t->mm->mmap_sem); } r = drm_gem_handle_create(filp, gobj, &handle); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 28e1219843329..ea0378c8b0497 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -622,6 +622,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) flags |= FOLL_WRITE; + down_read(¤t->mm->mmap_sem); + if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { /* check that we only use anonymous memory to prevent problems with writeback */ @@ -629,8 +631,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) struct vm_area_struct *vma; vma = find_vma(gtt->usermm, gtt->userptr); - if (!vma || vma->vm_file || vma->vm_end < end) + if (!vma || vma->vm_file || vma->vm_end < end) { + up_read(¤t->mm->mmap_sem); return -EPERM; + } } do { @@ -657,10 +661,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) } while (pinned < ttm->num_pages); + up_read(¤t->mm->mmap_sem); return 0; release_pages: release_pages(pages, pinned, 0); + up_read(¤t->mm->mmap_sem); return r; } From ca666a3c298f838346ccea46ff542c605e68deb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 5 Sep 2017 14:30:05 +0200 Subject: [PATCH 094/232] drm/amdgpu: stop using BO status for user pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead use a counter to figure out if we need to set new pages or not. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 +++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cc6de0b46326b..f3e561136597d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1802,6 +1802,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); +bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 283a216ee758a..4d3f8fbfa59de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -473,7 +473,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, return -EPERM; /* Check if we have user pages and nobody bound the BO already */ - if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { + if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && + lobj->user_pages) { amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, lobj->user_pages); binding_userptr = true; @@ -534,23 +535,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&need_pages); for (i = p->bo_list->first_userptr; i < p->bo_list->num_entries; ++i) { + struct amdgpu_bo *bo; e = &p->bo_list->array[i]; + bo = e->robj; - if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, + if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, &e->user_invalidated) && e->user_pages) { /* We acquired a page array, but somebody * invalidated it. Free it and try again */ release_pages(e->user_pages, - e->robj->tbo.ttm->num_pages, + bo->tbo.ttm->num_pages, false); kvfree(e->user_pages); e->user_pages = NULL; } - if (e->robj->tbo.ttm->state != tt_bound && + if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && !e->user_pages) { list_del(&e->tv.head); list_add(&e->tv.head, &need_pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ea0378c8b0497..e677851910320 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -609,6 +609,7 @@ struct amdgpu_ttm_tt { spinlock_t guptasklock; struct list_head guptasks; atomic_t mmu_invalidations; + uint32_t last_set_pages; struct list_head list; }; @@ -672,8 +673,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) { + struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned i; + gtt->last_set_pages = atomic_read(>t->mmu_invalidations); for (i = 0; i < ttm->num_pages; ++i) { if (ttm->pages[i]) put_page(ttm->pages[i]); @@ -1025,6 +1028,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, spin_lock_init(>t->guptasklock); INIT_LIST_HEAD(>t->guptasks); atomic_set(>t->mmu_invalidations, 0); + gtt->last_set_pages = 0; return 0; } @@ -1077,6 +1081,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, return prev_invalidated != *last_invalidated; } +bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL || !gtt->userptr) + return false; + + return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; +} + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; From 1b0c0f9dc5ca6c0c8be21eeac92c7aa77bbf1d33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 5 Sep 2017 14:36:44 +0200 Subject: [PATCH 095/232] drm/amdgpu: move userptr BOs to CPU domain during CS v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of moving them in the MMU notifier move them during CS. v2: still mark pages as accessed/dirty Signed-off-by: Christian König Reviewed-by: Felix Kuehling (v1) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 27 +++++++++++++++++-------- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f3e561136597d..134a049f87bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1794,6 +1794,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); +void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4d3f8fbfa59de..4877df83b801d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -475,6 +475,12 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, /* Check if we have user pages and nobody bound the BO already */ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && lobj->user_pages) { + amdgpu_ttm_placement_from_domain(bo, + AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, + false); + if (r) + return r; amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, lobj->user_pages); binding_userptr = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 6558a3ed57a7f..df85a1314799b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -137,10 +137,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); + amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm); amdgpu_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e677851910320..fe887f361be83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -685,6 +685,24 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) } } +void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned i; + + for (i = 0; i < ttm->num_pages; ++i) { + struct page *page = ttm->pages[i]; + + if (!page) + continue; + + if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) + set_page_dirty(page); + + mark_page_accessed(page); + } +} + static void amdgpu_trace_dma_map(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -740,7 +758,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - struct sg_page_iter sg_iter; int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? @@ -753,13 +770,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) /* free the sg table and pages again */ dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) - set_page_dirty(page); - - mark_page_accessed(page); - } + amdgpu_ttm_tt_mark_user_pages(ttm); amdgpu_trace_dma_unmap(ttm); From 60de1c1740f390fe48141b54d04cc53a6073d347 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 5 Sep 2017 14:50:24 +0200 Subject: [PATCH 096/232] drm/amdgpu: use a rw_semaphore for MMU notifiers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow at least some parallel processing. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index df85a1314799b..6d216abd0e1d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -50,7 +50,7 @@ struct amdgpu_mn { struct hlist_node node; /* objects protected by lock */ - struct mutex lock; + struct rw_semaphore lock; struct rb_root objects; }; @@ -74,7 +74,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - mutex_lock(&rmn->lock); + down_write(&rmn->lock); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { @@ -84,7 +84,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) } kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); mutex_unlock(&adev->mn_lock); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); @@ -160,7 +160,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; - mutex_lock(&rmn->lock); + down_read(&rmn->lock); it = interval_tree_iter_first(&rmn->objects, address, address); if (it) { @@ -170,7 +170,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, address, address); } - mutex_unlock(&rmn->lock); + up_read(&rmn->lock); } /** @@ -195,7 +195,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - mutex_lock(&rmn->lock); + down_read(&rmn->lock); it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { @@ -207,7 +207,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, start, end); } - mutex_unlock(&rmn->lock); + up_read(&rmn->lock); } static const struct mmu_notifier_ops amdgpu_mn_ops = { @@ -248,7 +248,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; - mutex_init(&rmn->lock); + init_rwsem(&rmn->lock); rmn->objects = RB_ROOT; r = __mmu_notifier_register(&rmn->mn, mm); @@ -295,7 +295,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - mutex_lock(&rmn->lock); + down_write(&rmn->lock); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -309,7 +309,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); return -ENOMEM; } } @@ -324,7 +324,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); return 0; } @@ -350,7 +350,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) return; } - mutex_lock(&rmn->lock); + down_write(&rmn->lock); /* save the next list entry for later */ head = bo->mn_list.next; @@ -365,6 +365,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); mutex_unlock(&adev->mn_lock); } From 3fe89771cb0a65d3b686bcafb5b7e3ebae0ea604 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 12 Sep 2017 14:25:14 -0400 Subject: [PATCH 097/232] drm/amdgpu: stop reserving the BO in the MMU callback v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead take the callback lock during the final parts of CS. This should solve the last remaining locking order problems with BO reservations. v2: rebase, make dummy functions static inline v3: add one more missing inline and comments Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 30 ++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 30 ++++++++++++++++++-------- 3 files changed, 56 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 134a049f87bdd..740683474a1c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -178,6 +178,7 @@ struct amdgpu_cs_parser; struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; +struct amdgpu_mn; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -1057,6 +1058,7 @@ struct amdgpu_cs_parser { /* buffer objects */ struct ww_acquire_ctx ticket; struct amdgpu_bo_list *bo_list; + struct amdgpu_mn *mn; struct amdgpu_bo_list_entry vm_pd; struct list_head validated; struct dma_fence *fence; @@ -1201,9 +1203,18 @@ void amdgpu_test_moves(struct amdgpu_device *adev); * MMU Notifier */ #if defined(CONFIG_MMU_NOTIFIER) +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev); int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); +void amdgpu_mn_lock(struct amdgpu_mn *mn); +void amdgpu_mn_unlock(struct amdgpu_mn *mn); #else +static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} +static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} +static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +{ + return NULL; +} static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) { return -ENODEV; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4877df83b801d..c2310d4eebc8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -513,8 +513,11 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&p->validated); p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); - if (p->bo_list) + if (p->bo_list) { amdgpu_bo_list_get_list(p->bo_list, &p->validated); + if (p->bo_list->first_userptr != p->bo_list->num_entries) + p->mn = amdgpu_mn_get(p->adev); + } INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); @@ -722,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (!error) - ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - parser->fence); - else if (backoff) + if (error && backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); @@ -1127,14 +1126,29 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_ring *ring = p->job->ring; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; + unsigned i; int r; + amdgpu_mn_lock(p->mn); + if (p->bo_list) { + for (i = p->bo_list->first_userptr; + i < p->bo_list->num_entries; ++i) { + struct amdgpu_bo *bo = p->bo_list->array[i].robj; + + if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { + amdgpu_mn_unlock(p->mn); + return -ERESTARTSYS; + } + } + } + job = p->job; p->job = NULL; r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); return r; } @@ -1150,6 +1164,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); + + ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); + amdgpu_mn_unlock(p->mn); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 6d216abd0e1d7..99edb40b5f998 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -106,6 +106,25 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, schedule_work(&rmn->work); } + +/** + * amdgpu_mn_lock - take the write side lock for this mn + */ +void amdgpu_mn_lock(struct amdgpu_mn *mn) +{ + if (mn) + down_write(&mn->lock); +} + +/** + * amdgpu_mn_unlock - drop the write side lock for this mn + */ +void amdgpu_mn_unlock(struct amdgpu_mn *mn) +{ + if (mn) + up_write(&mn->lock); +} + /** * amdgpu_mn_invalidate_node - unmap all BOs of a node * @@ -126,20 +145,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) continue; - r = amdgpu_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm); - - amdgpu_bo_unreserve(bo); } } @@ -223,7 +234,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = { * * Creates a notifier context for current->mm. */ -static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) { struct mm_struct *mm = current->mm; struct amdgpu_mn *rmn; @@ -368,3 +379,4 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) up_write(&rmn->lock); mutex_unlock(&adev->mn_lock); } + From f5683f8b3794f9496877fb9e20305433c6575b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 6 Sep 2017 16:55:55 +0200 Subject: [PATCH 098/232] drm/ttm: allow mapping BOs while they are still on the swap list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With shared reservation objects it is possible that we want to temporary kmap an BO while it is still on the swap list. Signed-off-by: Christian König Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c934ad5b39036..78cb99be71462 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -587,7 +587,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long offset, size; int ret; - BUG_ON(!list_empty(&bo->swap)); map->virtual = NULL; map->bo = bo; if (num_pages > bo->num_pages) From 9cca0b8e5df0ac438c65eec5044bfa089d16fbbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 6 Sep 2017 16:15:28 +0200 Subject: [PATCH 099/232] drm/amdgpu: move amdgpu_cs_sysvm_access_required into find_mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we need to find the mapping we need sysvm access anyway. Signed-off-by: Christian König Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 64 +++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 16 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 11 ++--- 4 files changed, 36 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 740683474a1c4..5ee6cea8caaf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -179,6 +179,7 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_mn; +struct amdgpu_bo_va_mapping; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -1900,10 +1901,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } #endif -struct amdgpu_bo_va_mapping * -amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, - uint64_t addr, struct amdgpu_bo **bo); -int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser); +int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, + uint64_t addr, struct amdgpu_bo **bo, + struct amdgpu_bo_va_mapping **mapping); #include "amdgpu_object.h" #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c2310d4eebc8b..c30110a3024ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -921,11 +921,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, uint64_t offset; uint8_t *kptr; - m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, - &aobj); - if (!aobj) { + r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, + &aobj, &m); + if (r) { DRM_ERROR("IB va_start is invalid\n"); - return -EINVAL; + return r; } if ((chunk_ib->va_start + chunk_ib->ib_bytes) > @@ -1475,15 +1475,16 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, * virtual memory address. Returns allocation structure when found, NULL * otherwise. */ -struct amdgpu_bo_va_mapping * -amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, - uint64_t addr, struct amdgpu_bo **bo) +int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, + uint64_t addr, struct amdgpu_bo **bo, + struct amdgpu_bo_va_mapping **map) { struct amdgpu_bo_va_mapping *mapping; unsigned i; + int r; if (!parser->bo_list) - return NULL; + return 0; addr /= AMDGPU_GPU_PAGE_SIZE; @@ -1500,7 +1501,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, continue; *bo = lobj->bo_va->base.bo; - return mapping; + *map = mapping; + goto found; } list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { @@ -1509,44 +1511,22 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, continue; *bo = lobj->bo_va->base.bo; - return mapping; + *map = mapping; + goto found; } } - return NULL; -} + return -EINVAL; -/** - * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM - * - * @parser: command submission parser context - * - * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM. - */ -int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) -{ - unsigned i; - int r; +found: + r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem); + if (unlikely(r)) + return r; - if (!parser->bo_list) + if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) return 0; - for (i = 0; i < parser->bo_list->num_entries; i++) { - struct amdgpu_bo *bo = parser->bo_list->array[i].robj; - - r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); - if (unlikely(r)) - return r; - - if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) - continue; - - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (unlikely(r)) - return r; - } - - return 0; + (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); + return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e19928dae8e3f..331e34ac61fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -410,10 +410,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); int r = 0; - mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); - if (mapping == NULL) { + r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); + if (r) { DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); - return -EINVAL; + return r; } if (!ctx->parser->adev->uvd.address_64_bit) { @@ -737,10 +737,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); int r; - mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); - if (mapping == NULL) { + r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); + if (r) { DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); - return -EINVAL; + return r; } start = amdgpu_bo_gpu_offset(bo); @@ -917,10 +917,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } - r = amdgpu_cs_sysvm_access_required(parser); - if (r) - return r; - ctx.parser = parser; ctx.buf_sizes = buf_sizes; ctx.ib_idx = ib_idx; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index c855366521abc..b46280c1279f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo; uint64_t addr; + int r; if (index == 0xffffffff) index = 0; @@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; addr += ((uint64_t)size) * ((uint64_t)index); - mapping = amdgpu_cs_find_mapping(p, addr, &bo); - if (mapping == NULL) { + r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); + if (r) { DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", addr, lo, hi, size, index); - return -EINVAL; + return r; } if ((addr + (uint64_t)size) > @@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) p->job->vm = NULL; ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); - r = amdgpu_cs_sysvm_access_required(p); - if (r) - return r; - while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); From aebc5e6f50f770ec9392c3ca804f18b30797dfa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 6 Sep 2017 16:55:16 +0200 Subject: [PATCH 100/232] drm/amdgpu: rework amdgpu_cs_find_mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the VM instead of the BO list to find the BO for a virtual address. This fixes UVD/VCE in physical mode with VM local BOs. Signed-off-by: Christian König Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 42 ++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 4 files changed, 30 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c30110a3024ae..5f19227b35e93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1479,46 +1479,24 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **map) { + struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va_mapping *mapping; - unsigned i; int r; - if (!parser->bo_list) - return 0; - addr /= AMDGPU_GPU_PAGE_SIZE; - for (i = 0; i < parser->bo_list->num_entries; i++) { - struct amdgpu_bo_list_entry *lobj; - - lobj = &parser->bo_list->array[i]; - if (!lobj->bo_va) - continue; - - list_for_each_entry(mapping, &lobj->bo_va->valids, list) { - if (mapping->start > addr || - addr > mapping->last) - continue; - - *bo = lobj->bo_va->base.bo; - *map = mapping; - goto found; - } - - list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { - if (mapping->start > addr || - addr > mapping->last) - continue; + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); + if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) + return -EINVAL; - *bo = lobj->bo_va->base.bo; - *map = mapping; - goto found; - } - } + *bo = mapping->bo_va->base.bo; + *map = mapping; - return -EINVAL; + /* Double check that the BO is reserved by this CS */ + if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket) + return -EINVAL; -found: r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem); if (unlikely(r)) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 42492e63b3a29..a4891bea2ca8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -35,6 +35,7 @@ /* bo virtual addresses in a vm */ struct amdgpu_bo_va_mapping { + struct amdgpu_bo_va *bo_va; struct list_head list; struct rb_node rb; uint64_t start; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 545531db66db9..758bbb9e77f3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2086,6 +2086,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; + mapping->bo_va = bo_va; list_add(&mapping->list, &bo_va->invalids); amdgpu_vm_it_insert(mapping, &vm->va); @@ -2263,6 +2264,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); + mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) @@ -2348,6 +2350,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, if (tmp->last > eaddr) tmp->last = eaddr; + tmp->bo_va = NULL; list_add(&tmp->list, &vm->freed); trace_amdgpu_vm_bo_unmap(NULL, tmp); } @@ -2373,6 +2376,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, return 0; } +/** + * amdgpu_vm_bo_lookup_mapping - find mapping by address + * + * @vm: the requested VM + * + * Find a mapping by it's address. + */ +struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, + uint64_t addr) +{ + return amdgpu_vm_it_iter_first(&vm->va, addr, addr); +} + /** * amdgpu_vm_bo_rmv - remove a bo to a specific vm * @@ -2398,6 +2414,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); + mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 90b7741d024b8..c1accd15efc80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -276,6 +276,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); +struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, + uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, From 1ed3d2567c800eca053ef86fdd3fc27b72d0192e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 5 Sep 2017 17:30:46 +0200 Subject: [PATCH 101/232] drm/amdgpu: keep the MMU lock until the update ends v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is quite controversial because it adds another lock which is held during page table updates, but I don't see much other option. v2: allow multiple updates to be in flight at the same time v3: simplify the patch, take the read side only once v4: correctly fix rebase conflict Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 59 ++++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5ee6cea8caaf8..0758d9176ea12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1204,11 +1204,11 @@ void amdgpu_test_moves(struct amdgpu_device *adev); * MMU Notifier */ #if defined(CONFIG_MMU_NOTIFIER) +void amdgpu_mn_lock(struct amdgpu_mn *mn); +void amdgpu_mn_unlock(struct amdgpu_mn *mn); struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev); int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); -void amdgpu_mn_lock(struct amdgpu_mn *mn); -void amdgpu_mn_unlock(struct amdgpu_mn *mn); #else static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 99edb40b5f998..521a51b37f5d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -52,6 +52,8 @@ struct amdgpu_mn { /* objects protected by lock */ struct rw_semaphore lock; struct rb_root objects; + struct mutex read_lock; + atomic_t recursion; }; struct amdgpu_mn_node { @@ -125,6 +127,34 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) up_write(&mn->lock); } +/** + * amdgpu_mn_read_lock - take the rmn read lock + * + * @rmn: our notifier + * + * Take the rmn read side lock. + */ +static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn) +{ + mutex_lock(&rmn->read_lock); + if (atomic_inc_return(&rmn->recursion) == 1) + down_read_non_owner(&rmn->lock); + mutex_unlock(&rmn->read_lock); +} + +/** + * amdgpu_mn_read_unlock - drop the rmn read lock + * + * @rmn: our notifier + * + * Drop the rmn read side lock. + */ +static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn) +{ + if (atomic_dec_return(&rmn->recursion) == 0) + up_read_non_owner(&rmn->lock); +} + /** * amdgpu_mn_invalidate_node - unmap all BOs of a node * @@ -171,7 +201,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; - down_read(&rmn->lock); + amdgpu_mn_read_lock(rmn); it = interval_tree_iter_first(&rmn->objects, address, address); if (it) { @@ -181,7 +211,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, address, address); } - up_read(&rmn->lock); + amdgpu_mn_read_unlock(rmn); } /** @@ -206,7 +236,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - down_read(&rmn->lock); + amdgpu_mn_read_lock(rmn); it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { @@ -217,14 +247,33 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, start, end); } +} + +/** + * amdgpu_mn_invalidate_range_end - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * Release the lock again to allow new command submissions. + */ +static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); - up_read(&rmn->lock); + amdgpu_mn_read_unlock(rmn); } static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, }; /** @@ -261,6 +310,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->mn.ops = &amdgpu_mn_ops; init_rwsem(&rmn->lock); rmn->objects = RB_ROOT; + mutex_init(&rmn->read_lock); + atomic_set(&rmn->recursion, 0); r = __mmu_notifier_register(&rmn->mn, mm); if (r) From 711becf0e6c5bf7cabd8f1ca528e9bbe085fda9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 8 Sep 2017 17:19:19 +0200 Subject: [PATCH 102/232] drm/amdgpu: move amdgpu_ttm_tt_* declarations into amdgpu_ttm.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just some cleanup. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 15 --------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0758d9176ea12..814df1a212c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1804,21 +1804,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); -int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); -void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm); -int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, - uint32_t flags); -bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); -struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); -bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end); -bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, - int *last_invalidated); -bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm); -bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); -uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 43093bffa2cfa..64709e041d5b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -82,4 +82,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); +int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); +void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm); +int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags); +bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end); +bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, + int *last_invalidated); +bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); +uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, + struct ttm_mem_reg *mem); + #endif From 9a18999640fa6aed0578c59c328dca9ca01a2d9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 12 Sep 2017 14:29:07 -0400 Subject: [PATCH 103/232] drm/amdgpu: move MMU notifier related defines to amdgpu_mn.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just some cleanup. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 25 +------------ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h | 52 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 24 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 814df1a212c48..802fdc11944b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -65,6 +65,7 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "amdgpu_vcn.h" +#include "amdgpu_mn.h" #include "gpu_scheduler.h" #include "amdgpu_virt.h" @@ -178,7 +179,6 @@ struct amdgpu_cs_parser; struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; -struct amdgpu_mn; struct amdgpu_bo_va_mapping; enum amdgpu_cp_irq { @@ -1200,29 +1200,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); */ void amdgpu_test_moves(struct amdgpu_device *adev); -/* - * MMU Notifier - */ -#if defined(CONFIG_MMU_NOTIFIER) -void amdgpu_mn_lock(struct amdgpu_mn *mn); -void amdgpu_mn_unlock(struct amdgpu_mn *mn); -struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev); -int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); -void amdgpu_mn_unregister(struct amdgpu_bo *bo); -#else -static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} -static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} -static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) -{ - return NULL; -} -static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) -{ - return -ENODEV; -} -static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} -#endif - /* * Debugfs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h new file mode 100644 index 0000000000000..d0095a3793b8e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -0,0 +1,52 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ +#ifndef __AMDGPU_MN_H__ +#define __AMDGPU_MN_H__ + +/* + * MMU Notifier + */ +struct amdgpu_mn; + +#if defined(CONFIG_MMU_NOTIFIER) +void amdgpu_mn_lock(struct amdgpu_mn *mn); +void amdgpu_mn_unlock(struct amdgpu_mn *mn); +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev); +int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); +void amdgpu_mn_unregister(struct amdgpu_bo *bo); +#else +static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} +static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} +static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +{ + return NULL; +} +static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) +{ + return -ENODEV; +} +static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} +#endif + +#endif From f1ac0fc2f2355495628a45e90cbd88e3d2c40ef9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 4 Sep 2017 17:42:28 +0800 Subject: [PATCH 104/232] drm/amdgpu: fixed raven psp cmd prepare and submit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - fw_size in psp_v10_0_prep_cmd_buf is wrongly set as 0 - fixed the wrong calculation of psp_write_ptr_reg in psp_v10_0_cmd_submit Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index f7cf994b1da28..b77feef829e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -136,15 +136,13 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm { int ret; uint64_t fw_mem_mc_addr = ucode->mc_addr; - struct common_firmware_header *header; memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); - header = (struct common_firmware_header *)ucode->fw; cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes); + cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); if (ret) @@ -245,15 +243,20 @@ int psp_v10_0_cmd_submit(struct psp_context *psp, struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; + uint32_t ring_size_dw = ring->ring_size / 4; + uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; /* KM (GPCOM) prepare write pointer */ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); /* Update KM RB frame pointer to new frame */ - if ((psp_write_ptr_reg % ring->ring_size) == 0) + if ((psp_write_ptr_reg % ring_size_dw) == 0) write_frame = ring->ring_mem; else - write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4)); + write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw); + + /* Initialize KM RB frame */ + memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); /* Update KM RB frame */ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); @@ -263,8 +266,7 @@ int psp_v10_0_cmd_submit(struct psp_context *psp, write_frame->fence_value = index; /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4; - psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg; + psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); return 0; From 4ef72453311a697b3fb90da9c86c83012911ccf9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 8 Sep 2017 13:04:52 +0800 Subject: [PATCH 105/232] drm/amdgpu: added api for stopping psp ring (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - v2: reuse the ring stop api in ring destory Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 2 ++ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 15 ++++++++++++++- drivers/gpu/drm/amd/amdgpu/psp_v10_0.h | 2 ++ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 15 ++++++++++++++- drivers/gpu/drm/amd/amdgpu/psp_v3_1.h | 2 ++ 6 files changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8c2204c7b3847..abd20819ba6bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -57,6 +57,7 @@ static int psp_sw_init(void *handle) psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf; psp->ring_init = psp_v3_1_ring_init; psp->ring_create = psp_v3_1_ring_create; + psp->ring_stop = psp_v3_1_ring_stop; psp->ring_destroy = psp_v3_1_ring_destroy; psp->cmd_submit = psp_v3_1_cmd_submit; psp->compare_sram_data = psp_v3_1_compare_sram_data; @@ -69,6 +70,7 @@ static int psp_sw_init(void *handle) psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->ring_init = psp_v10_0_ring_init; psp->ring_create = psp_v10_0_ring_create; + psp->ring_stop = psp_v10_0_ring_stop; psp->ring_destroy = psp_v10_0_ring_destroy; psp->cmd_submit = psp_v10_0_cmd_submit; psp->compare_sram_data = psp_v10_0_compare_sram_data; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 538fa9dbfb212..e79795b59797d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -66,6 +66,8 @@ struct psp_context struct psp_gfx_cmd_resp *cmd); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); + int (*ring_stop)(struct psp_context *psp, + enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index b77feef829e7b..6ec5c9f8074d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -207,7 +207,7 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) return ret; } -int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; struct psp_ring *ring; @@ -227,6 +227,19 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 0x80000000, 0x80000000, false); + return ret; +} + +int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v10_0_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + amdgpu_bo_free_kernel(&adev->firmware.rbuf, &ring->ring_mem_mc_addr, (void **)&ring->ring_mem); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h index e76cde2f01f95..3af3ad1320ff8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h @@ -34,6 +34,8 @@ extern int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type); extern int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type); +extern int psp_v10_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type); extern int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type); extern int psp_v10_0_cmd_submit(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 2a535a4b8d5b3..bcbe30dfff39f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -319,7 +319,7 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) return ret; } -int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; struct psp_ring *ring; @@ -339,6 +339,19 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 0x80000000, 0x80000000, false); + return ret; +} + +int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v3_1_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + amdgpu_bo_free_kernel(&adev->firmware.rbuf, &ring->ring_mem_mc_addr, (void **)&ring->ring_mem); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h index 9dcd0b25c4c60..5af2231b7099d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h @@ -41,6 +41,8 @@ extern int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type); extern int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type); +extern int psp_v3_1_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type); extern int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type); extern int psp_v3_1_cmd_submit(struct psp_context *psp, From bcd6eab837fc9db67292c1d071ce2d96bb9689be Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 8 Sep 2017 13:09:50 +0800 Subject: [PATCH 106/232] drm/amdgpu: stop psp ring on suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise, the ring will fail to create on next resume. Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index abd20819ba6bd..0d530524ab215 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -455,6 +455,16 @@ static int psp_hw_fini(void *handle) static int psp_suspend(void *handle) { + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + + ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); + if (ret) { + DRM_ERROR("PSP ring stop failed\n"); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index e79795b59797d..1b7d12d88720f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -125,6 +125,7 @@ struct amdgpu_psp_funcs { #define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) +#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type)) #define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) From 50811c71fa1b7c2f8b422ad532d75a6860f15a8b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 4 Sep 2017 17:48:27 +0800 Subject: [PATCH 107/232] drm/amdgpu: enable raven to load firmwares by psp at default (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - v2: share code with CHIP_VEGA10 case Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 6 +----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0d530524ab215..84bd6ed7a641c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -64,9 +64,7 @@ static int psp_sw_init(void *handle) psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; break; case CHIP_RAVEN: -#if 0 psp->init_microcode = psp_v10_0_init_microcode; -#endif psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->ring_init = psp_v10_0_ring_init; psp->ring_create = psp_v10_0_ring_create; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 36c763310df5f..f306374ff6547 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) else return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: - if (!load_type) - return AMDGPU_FW_LOAD_DIRECT; - else - return AMDGPU_FW_LOAD_PSP; case CHIP_RAVEN: - if (load_type != 2) + if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; From d5884513a31df072879c89c80306d544467ee770 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 8 Sep 2017 14:09:41 +0200 Subject: [PATCH 108/232] drm/amdgpu: fix VM sync with always valid BOs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All users of a VM must always wait for updates with always valid BOs to be completed. v2: remove debugging leftovers, rename struct member Signed-off-by: Christian König Reviewed-by: Roger He Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5f19227b35e93..ff61073b7181f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -761,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update); - if (r) - return r; - r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; @@ -819,6 +815,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); + if (r) + return r; + + r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update); + if (r) + return r; if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 758bbb9e77f3a..64baa31389657 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1141,9 +1141,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(parent->base.bo, fence, true); - dma_fence_put(vm->last_dir_update); - vm->last_dir_update = dma_fence_get(fence); - dma_fence_put(fence); + dma_fence_put(vm->last_update); + vm->last_update = fence; } } @@ -1804,6 +1803,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, trace_amdgpu_vm_bo_mapping(mapping); } + if (bo_va->base.bo && + bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) { + dma_fence_put(vm->last_update); + vm->last_update = dma_fence_get(bo_va->last_pt_update); + } + return 0; } @@ -2587,7 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), "CPU update of VM recommended only for large BAR system\n"); - vm->last_dir_update = NULL; + vm->last_update = NULL; flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED; @@ -2693,7 +2698,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } amdgpu_vm_free_levels(&vm->root); - dma_fence_put(vm->last_dir_update); + dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vm_free_reserved_vmid(adev, vm, i); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index c1accd15efc80..cb6a6222fc3f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -140,7 +140,7 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_vm_pt root; - struct dma_fence *last_dir_update; + struct dma_fence *last_update; /* protecting freed */ spinlock_t freed_lock; From 9a5487ef56840cbebbb2e57c849bc1cc93c28002 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 5 Sep 2017 12:02:57 -0400 Subject: [PATCH 109/232] drm/amd/powerplay: Simplify vega10_patch_voltage_dependency_tables_with_lookup_table() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 75 ++++++------------- 1 file changed, 23 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 2e776edf9b8d5..8fb1dccaa4b8b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -591,61 +591,37 @@ static int vega10_patch_clock_voltage_limits_with_vddc_leakage( static int vega10_patch_voltage_dependency_tables_with_lookup_table( struct pp_hwmgr *hwmgr) { - uint8_t entry_id; - uint8_t voltage_id; + uint8_t entry_id, voltage_id; + unsigned i; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = - table_info->vdd_dep_on_socclk; - struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table = - table_info->vdd_dep_on_dcefclk; - struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table = - table_info->vdd_dep_on_pixclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table = - table_info->vdd_dep_on_dispclk; - struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table = - table_info->vdd_dep_on_phyclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; - for (entry_id = 0; entry_id < socclk_table->count; entry_id++) { - voltage_id = socclk_table->entries[entry_id].vddInd; - socclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) { - voltage_id = gfxclk_table->entries[entry_id].vddInd; - gfxclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) { - voltage_id = dcefclk_table->entries[entry_id].vddInd; - dcefclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } - - for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) { - voltage_id = pixclk_table->entries[entry_id].vddInd; - pixclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } + for (i = 0; i < 6; i++) { + struct phm_ppt_v1_clock_voltage_dependency_table *vdt; + switch (i) { + case 0: vdt = table_info->vdd_dep_on_socclk; break; + case 1: vdt = table_info->vdd_dep_on_sclk; break; + case 2: vdt = table_info->vdd_dep_on_dcefclk; break; + case 3: vdt = table_info->vdd_dep_on_pixclk; break; + case 4: vdt = table_info->vdd_dep_on_dispclk; break; + case 5: vdt = table_info->vdd_dep_on_phyclk; break; + } - for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) { - voltage_id = dspclk_table->entries[entry_id].vddInd; - dspclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + for (entry_id = 0; entry_id < vdt->count; entry_id++) { + voltage_id = vdt->entries[entry_id].vddInd; + vdt->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } } - for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) { - voltage_id = phyclk_table->entries[entry_id].vddInd; - phyclk_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; } for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { @@ -660,11 +636,6 @@ static int vega10_patch_voltage_dependency_tables_with_lookup_table( table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; } - for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { - voltage_id = mm_table->entries[entry_id].vddcInd; - mm_table->entries[entry_id].vddc = - table_info->vddc_lookup_table->entries[voltage_id].us_vdd; - } return 0; From 06474d5665729a3275d04212277f17792199b882 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 5 Sep 2017 12:33:58 -0400 Subject: [PATCH 110/232] drm/amd/powerplay: Simplify vega10_acg_disable() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 8fb1dccaa4b8b..d56e29a0c40b6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2313,13 +2313,11 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (data->smu_features[GNLD_ACG].supported) { - if (data->smu_features[GNLD_ACG].enabled) { - if (0 == vega10_enable_smc_features(hwmgr->smumgr, false, - data->smu_features[GNLD_ACG].smu_feature_bitmap)) + if (data->smu_features[GNLD_ACG].supported && + data->smu_features[GNLD_ACG].enabled) + if (!vega10_enable_smc_features(hwmgr->smumgr, false, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = false; - } - } return 0; } From 38e40d9cc428eed0795fa5345357f50da4ec0f21 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 6 Sep 2017 08:04:10 -0400 Subject: [PATCH 111/232] drm/amd/powerplay: Introduction of bitmask macros for registers Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/cgs_common.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 0214f63f52fc6..92eaa81f9fcba 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -310,6 +310,22 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space, unsigned index, uint32_t value); +#define CGS_REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT +#define CGS_REG_FIELD_MASK(reg, field) reg##__##field##_MASK + +#define CGS_REG_SET_FIELD(orig_val, reg, field, field_val) \ + (((orig_val) & ~CGS_REG_FIELD_MASK(reg, field)) | \ + (CGS_REG_FIELD_MASK(reg, field) & ((field_val) << CGS_REG_FIELD_SHIFT(reg, field)))) + +#define CGS_REG_GET_FIELD(value, reg, field) \ + (((value) & CGS_REG_FIELD_MASK(reg, field)) >> CGS_REG_FIELD_SHIFT(reg, field)) + +#define CGS_WREG32_FIELD(device, reg, field, val) \ + cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field)) + +#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \ + cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field)) + /** * cgs_get_pci_resource() - provide access to a device resource (PCI BAR) * @cgs_device: opaque device handle From 7246187abf965d14d462faef74920b3dd16cb5e3 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 6 Sep 2017 08:04:41 -0400 Subject: [PATCH 112/232] drm/amd/powerplay: Port vega10_didt_set_mask() to new macros Start using new CGS bitmask macros. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../amd/powerplay/hwmgr/vega10_powertune.c | 60 +++++++------------ 1 file changed, 20 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index e7fa67063cdcb..c5ff94f4fd0ff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -855,91 +855,71 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_SQ_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~SQ_Enable_MASK; didt_block_info |= en << SQ_Enable_SHIFT; } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DB_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~DB_Enable_MASK; didt_block_info |= en << DB_Enable_SHIFT; } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TD_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TD_Enable_MASK; didt_block_info |= en << TD_Enable_SHIFT; } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TCP_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TCP_Enable_MASK; didt_block_info |= en << TCP_Enable_SHIFT; } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0); - data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DBR_CTRL0, DIDT_CTRL_EN, en); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); - data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); - data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); - data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); - data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); - data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK; - data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK); - data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK; - data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK); + data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); + data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); } } From 583a888a77bcc0e25e29f958a3a77c133306dc73 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 07:43:33 -0400 Subject: [PATCH 113/232] drm/amd/powerplay: Add PP_CAP() macro To replace common lengthy sequence that would create really long lines all over. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index a4c8b09b6f146..03adece4efea6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -283,6 +283,8 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); } +#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c)) + #define PP_PCIEGenInvalid 0xffff enum PP_PCIEGen { PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */ From dd5a6fe2af0366803ddccf27981b7dd48e21099c Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 07:44:17 -0400 Subject: [PATCH 114/232] drm/amd/powerplay: Port vega10_hwmgr.c over to PP_CAP Replace and cleanup lengthy phm_cap_enabled() sequences with PP_CAP. Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 91 +++++++------------ 1 file changed, 32 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d56e29a0c40b6..4d7bd9fc91a8f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -381,12 +381,10 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (!data->registry_data.socclk_dpm_key_disabled) data->smu_features[GNLD_DPM_SOCCLK].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM)) + if (PP_CAP(PHM_PlatformCaps_UVDDPM)) data->smu_features[GNLD_DPM_UVD].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEDPM)) + if (PP_CAP(PHM_PlatformCaps_VCEDPM)) data->smu_features[GNLD_DPM_VCE].supported = true; if (!data->registry_data.pcie_dpm_key_disabled) @@ -395,9 +393,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (!data->registry_data.dcefclk_dpm_key_disabled) data->smu_features[GNLD_DPM_DCEFCLK].supported = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep) && - data->registry_data.sclk_deep_sleep_support) { + if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) && + data->registry_data.sclk_deep_sleep_support) { data->smu_features[GNLD_DS_GFXCLK].supported = true; data->smu_features[GNLD_DS_SOCCLK].supported = true; data->smu_features[GNLD_DS_LCLK].supported = true; @@ -497,8 +494,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) if (!vega10_get_socclk_for_voltage_evv(hwmgr, table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { + if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) { for (j = 1; j < socclk_table->count; j++) { if (socclk_table->entries[j].clk == sclk && socclk_table->entries[j].cks_enable == 0) { @@ -809,8 +805,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } /* VDDCI_MEM */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { + if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) { if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; @@ -1382,10 +1377,8 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table)); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { data->odn_dpm_table.odn_core_clock_dpm_levels. number_of_performance_levels = data->dpm_table.gfx_table.count; for (i = 0; i < data->dpm_table.gfx_table.count; i++) { @@ -2332,9 +2325,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); if (!result) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - (data->registry_data.regulator_hot_gpio_support)) { + if (PP_CAP(PHM_PlatformCaps_RegulatorHot) && + data->registry_data.regulator_hot_gpio_support) { pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; @@ -2346,9 +2338,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) pp_table->VR1HotPolarity = 0; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition) && - (data->registry_data.ac_dc_switch_gpio_support)) { + if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) && + data->registry_data.ac_dc_switch_gpio_support) { pp_table->AcDcGpio = gpio_params.ucAcDcGpio; pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; } else { @@ -2646,8 +2637,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) { + if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) { if (data->smu_features[GNLD_VR0HOT].supported) { PP_ASSERT_WITH_CODE( !vega10_enable_smc_features(hwmgr->smumgr, @@ -2861,8 +2851,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) data->vbios_boot_state.bsoc_vddc_lock = false; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { + if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) { if (data->smu_features[GNLD_ACDC].supported) { PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), @@ -2905,8 +2894,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to initialize SMC table!", result = tmp_result); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) { + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { tmp_result = vega10_enable_thermal_protection(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to enable thermal protection!", @@ -3141,8 +3129,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + if (PP_CAP(PHM_PlatformCaps_StablePState)) { PP_ASSERT_WITH_CODE( data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 && data->registry_data.stable_pstate_sclk_dpm_percentage <= 100, @@ -3207,10 +3194,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, disable_mclk_switching_for_frame_lock = phm_cap_enabled( hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchForVR); - force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ForceMclkHigh); + disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); + force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); disable_mclk_switching = (info.display_count > 1) || disable_mclk_switching_for_frame_lock || @@ -3261,8 +3246,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, vega10_ps->performance_levels[1].mem_clock; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { + if (PP_CAP(PHM_PlatformCaps_StablePState)) { for (i = 0; i < vega10_ps->performance_level_count; i++) { vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk; vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk; @@ -3294,10 +3278,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co data->need_update_dpm_table = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3381,10 +3363,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( uint32_t dpm_count, clock_percent; uint32_t i; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || + PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { if (!data->need_update_dpm_table && !data->apply_optimized_settings && @@ -3449,10 +3429,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( dpm_table-> gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. value = sclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || + PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) { /* Need to do calculation based on the golden DPM table * as the Heatmap GPU Clock axis is also based on * the default values @@ -3506,10 +3484,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( mem_table.dpm_levels[dpm_table->mem_table.count - 1]. value = mclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { + if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) || + PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) { PP_ASSERT_WITH_CODE( golden_dpm_table->mem_table.dpm_levels @@ -3840,9 +3816,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) int result = 0; uint32_t low_sclk_interrupt_threshold = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != + if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) && + (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; @@ -4253,8 +4228,7 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); break; case AMD_FAN_CTRL_MANUAL: - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); break; case AMD_FAN_CTRL_AUTO: @@ -4798,7 +4772,7 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg if (data->display_timing.num_existing_displays != info.display_count) is_update_required = true; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) is_update_required = true; } @@ -4815,8 +4789,7 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) "DPM is not running right now, no need to disable DPM!", return 0); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) + if (PP_CAP(PHM_PlatformCaps_ThermalController)) vega10_disable_thermal_protection(hwmgr); tmp_result = vega10_disable_power_containment(hwmgr); From 0f26b7b03cbb394451dd1c771a4e0024b805df90 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 07:46:40 -0400 Subject: [PATCH 115/232] drm/amd/powerplay: Port vega10_powertune.c over to PP_CAP Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../amd/powerplay/hwmgr/vega10_powertune.c | 47 +++++++++---------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index c5ff94f4fd0ff..0090ff1bf416d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -854,69 +854,69 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) uint32_t en = (enable ? 1 : 0); uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping)) { CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, DIDT_SQ_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~SQ_Enable_MASK; didt_block_info |= en << SQ_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRamping)) { CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, DIDT_DB_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~DB_Enable_MASK; didt_block_info |= en << DB_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + if (PP_CAP(PHM_PlatformCaps_TDRamping)) { CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, DIDT_TD_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TD_Enable_MASK; didt_block_info |= en << TD_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, DIDT_TCP_CTRL0, DIDT_CTRL_EN, en); didt_block_info &= ~TCP_Enable_MASK; didt_block_info |= en << TCP_Enable_SHIFT; } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, DIDT_DBR_CTRL0, DIDT_CTRL_EN, en); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + if (PP_CAP(PHM_PlatformCaps_TDRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); @@ -1020,10 +1020,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) + if (PP_CAP(PHM_PlatformCaps_GCEDC)) vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); return 0; @@ -1039,12 +1039,12 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); return 0; @@ -1139,12 +1139,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); return 0; @@ -1160,12 +1160,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) cgs_enter_safe_mode(hwmgr->device, false); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data); } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + if (PP_CAP(PHM_PlatformCaps_PSM)) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); return 0; @@ -1361,8 +1361,7 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_PPT].smu_feature_bitmap), @@ -1389,8 +1388,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_PPT].smu_feature_bitmap), @@ -1418,8 +1416,7 @@ int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) { int adjust_percent, result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? hwmgr->platform_descriptor.TDPAdjustment : From f85a49bdb6eed2705a7e5097e5bd1ee0e8da1d9f Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 07:48:16 -0400 Subject: [PATCH 116/232] drm/amd/powerplay: Port vega10_thermal.c over to PP_CAP Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d8551ae79f538..eee143ad60a25 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -54,8 +54,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, fan_speed_info->min_percent = 0; fan_speed_info->max_percent = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) && hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution) { fan_speed_info->supports_rpm_read = true; @@ -279,8 +278,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (speed > 100) speed = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_stop_smc_fan_control(hwmgr); reg = soc15_get_register_offset(THM_HWID, 0, @@ -319,8 +317,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); } else result = vega10_fan_ctrl_set_default_mode(hwmgr); @@ -346,8 +343,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return -1; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); if (!result) { @@ -627,10 +623,8 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, * this function was included in the table. * Make sure that we still think controlling the fan is OK. */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_start_smc_fan_control(hwmgr); - } return 0; } From 8c755d9abc158799c1b1087da3c823ff82048fbb Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 10:36:40 -0400 Subject: [PATCH 117/232] drm/amd/powerplay: Tidy up vega10_fan_ctrl_get_fan_speed_rpm() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index eee143ad60a25..f62b85237236e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -104,14 +104,15 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (hwmgr->thermal_controller.fanInfo.bNoFan) return -1; - if (data->smu_features[GNLD_FAN_CONTROL].supported) + if (data->smu_features[GNLD_FAN_CONTROL].supported) { result = vega10_get_current_rpm(hwmgr, speed); - else { + } else { uint32_t reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); - tach_period = (cgs_read_register(hwmgr->device, - reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> - CG_TACH_STATUS__TACH_PERIOD__SHIFT; + tach_period = + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_STATUS, + TACH_PERIOD); if (tach_period == 0) return -EINVAL; From 893c3880e4f0f1fd405af2096e9052a5be3b06e2 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 12:20:48 -0400 Subject: [PATCH 118/232] drm/amd/powerplay: Tidy up vega10_fan_ctrl_set_static_mode() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_thermal.c | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index f62b85237236e..4e364035945c4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -141,23 +141,20 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) if (hwmgr->fan_ctrl_is_in_default_mode) { hwmgr->fan_ctrl_default_mode = - (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> - CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; - hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL2__TMIN_MASK) >> - CG_FDO_CTRL2__TMIN__SHIFT; + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = + CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN); hwmgr->fan_ctrl_is_in_default_mode = false; } cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TMIN_MASK) | - (0 << CG_FDO_CTRL2__TMIN__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN, 0)); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | - (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE, mode)); return 0; } From 0c69dd0a55cd0b5c6a75728c509ca40d1e16e0b7 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 12:26:42 -0400 Subject: [PATCH 119/232] drm/amd/powerplay: Tidy up vega10_fan_ctrl_set_default_mode() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 4e364035945c4..cda7fcfeefd7b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -173,14 +173,13 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) if (!hwmgr->fan_ctrl_is_in_default_mode) { cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | - (hwmgr->fan_ctrl_default_mode << - CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, FDO_PWM_MODE, + hwmgr->fan_ctrl_default_mode)); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TMIN_MASK) | - (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TMIN, + hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); hwmgr->fan_ctrl_is_in_default_mode = true; } From 0dba3739a72362e41b84f0751d5f51f981c5ff07 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 12:41:51 -0400 Subject: [PATCH 120/232] drm/amd/powerplay: Tidy up vega10_fan_ctrl_set_fan_speed_percent() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index cda7fcfeefd7b..7a25e226d7d3c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -281,9 +281,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); - duty100 = (cgs_read_register(hwmgr->device, reg) & - CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> - CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL1, FMAX_DUTY100); if (duty100 == 0) return -EINVAL; @@ -295,9 +294,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) | - (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } From 1f9ba3bec60fdebae43e8076a497928a40a6b0ca Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 12:42:38 -0400 Subject: [PATCH 121/232] drm/amd/powerplay: Fix indentation in vega10_fan_ctrl_reset_fan_speed_to_default() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 7a25e226d7d3c..664133eeb59c3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -307,17 +307,13 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, */ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) { - int result; - if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; - if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { - result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = vega10_fan_ctrl_set_default_mode(hwmgr); - - return result; + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + return vega10_fan_ctrl_start_smc_fan_control(hwmgr); + else + return vega10_fan_ctrl_set_default_mode(hwmgr); } /** From 657b1f469744ea7fe891dbd037bdd7af2fc8f777 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 12:59:32 -0400 Subject: [PATCH 122/232] drm/amd/powerplay: Tidy up vega10_fan_ctrl_set_fan_speed_rpm() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 664133eeb59c3..ff0a89ca72c2a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -330,8 +330,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan || - (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || - (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return -1; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) @@ -343,9 +343,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_TACH_STATUS__TACH_PERIOD_MASK) | - (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_STATUS, TACH_PERIOD, + tach_period)); } return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); } From 23db59e48aa8e5eb622afd9f698e5263fb72c464 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 7 Sep 2017 13:08:28 -0400 Subject: [PATCH 123/232] drm/amd/powerplay: Tidy up vega10_thermal_set_temperature_range() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_thermal.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index ff0a89ca72c2a..d5d676595f10e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -405,20 +405,10 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = cgs_read_register(hwmgr->device, reg); - val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK); - val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK); - val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK); - val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) - << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT); - - val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); - val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) - << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); - + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); cgs_write_register(hwmgr->device, reg, val); From 298e87c95f9c8c2f33d274e92568c6b83ac999a5 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 8 Sep 2017 09:45:34 -0400 Subject: [PATCH 124/232] drm/amd/powerplay: Tidy up vega10_thermal_initialize() Signed-off-by: Tom St Denis Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d5d676595f10e..5b3c443d4e948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -429,19 +429,16 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) reg = soc15_get_register_offset(THM_HWID, 0, mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_TACH_CTRL__EDGE_PER_REV_MASK) | - ((hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution - 1) << - CG_TACH_CTRL__EDGE_PER_REV__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1)); } reg = soc15_get_register_offset(THM_HWID, 0, mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); cgs_write_register(hwmgr->device, reg, - (cgs_read_register(hwmgr->device, reg) & - ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) | - (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT)); + CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28)); return 0; } From 29c3035fe385b4214fc0515b9cd0ff53d23b4e82 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 2 Sep 2017 02:16:35 -0400 Subject: [PATCH 125/232] drm/amdgpu/gfx9: properly set the hdp flush reg for Raven Was only being assigned for vega10. Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2ab049c45b1d8..72dbf890c65b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3504,7 +3504,9 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask, reg_mem_engine; struct nbio_hdp_flush_reg *nbio_hf_reg; - if (ring->adev->asic_type == CHIP_VEGA10) + if (ring->adev->flags & AMD_IS_APU) + nbio_hf_reg = &nbio_v7_0_hdp_flush_reg; + else nbio_hf_reg = &nbio_v6_1_hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { From 4a00f21db800bc64264bb6764c3d0d0878e9f4c4 Mon Sep 17 00:00:00 2001 From: Himanshu Jha Date: Mon, 11 Sep 2017 18:07:26 +0530 Subject: [PATCH 126/232] drm/amd/powerplay: remove unnecessary call to memset call to memset to assign 0 value immediately after allocating memory with kzalloc is unnecesaary as kzalloc allocates the memory filled with 0 value. Semantic patch used to resolve this issue: @@ expression e,e2; constant c; statement S; @@ e = kzalloc(e2, c); if(e == NULL) S - memset(e, 0, e2); Signed-off-by: Himanshu Jha Signed-off-by: Alex Deucher --- .../powerplay/hwmgr/process_pptables_v1_0.c | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 84f01fd33aff7..d1af1483c69ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -173,8 +173,6 @@ static int get_vddc_lookup_table( if (NULL == table) return -ENOMEM; - memset(table, 0x00, table_size); - table->count = vddc_lookup_pp_tables->ucNumEntries; for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) { @@ -335,8 +333,6 @@ static int get_valid_clk( if (NULL == table) return -ENOMEM; - memset(table, 0x00, table_size); - table->count = (uint32_t)clk_volt_pp_table->count; for (i = 0; i < table->count; i++) { @@ -390,8 +386,6 @@ static int get_mclk_voltage_dependency_table( if (NULL == mclk_table) return -ENOMEM; - memset(mclk_table, 0x00, table_size); - mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { @@ -439,8 +433,6 @@ static int get_sclk_voltage_dependency_table( if (NULL == sclk_table) return -ENOMEM; - memset(sclk_table, 0x00, table_size); - sclk_table->count = (uint32_t)tonga_table->ucNumEntries; for (i = 0; i < tonga_table->ucNumEntries; i++) { @@ -473,8 +465,6 @@ static int get_sclk_voltage_dependency_table( if (NULL == sclk_table) return -ENOMEM; - memset(sclk_table, 0x00, table_size); - sclk_table->count = (uint32_t)polaris_table->ucNumEntries; for (i = 0; i < polaris_table->ucNumEntries; i++) { @@ -525,8 +515,6 @@ static int get_pcie_table( if (pcie_table == NULL) return -ENOMEM; - memset(pcie_table, 0x00, table_size); - /* * Make sure the number of pcie entries are less than or equal to sclk dpm levels. * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1. @@ -567,8 +555,6 @@ static int get_pcie_table( if (pcie_table == NULL) return -ENOMEM; - memset(pcie_table, 0x00, table_size); - /* * Make sure the number of pcie entries are less than or equal to sclk dpm levels. * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1. @@ -615,8 +601,6 @@ static int get_cac_tdp_table( if (NULL == tdp_table) return -ENOMEM; - memset(tdp_table, 0x00, table_size); - hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); if (NULL == hwmgr->dyn_state.cac_dtp_table) { @@ -624,8 +608,6 @@ static int get_cac_tdp_table( return -ENOMEM; } - memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); - if (table->ucRevId < 3) { const ATOM_Tonga_PowerTune_Table *tonga_table = (ATOM_Tonga_PowerTune_Table *)table; @@ -725,8 +707,6 @@ static int get_mm_clock_voltage_table( if (NULL == mm_table) return -ENOMEM; - memset(mm_table, 0x00, table_size); - mm_table->count = mm_dependency_table->ucNumEntries; for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { From 4e55eb3879fea6d8c7d414cebaa5bff1da58b4a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 11 Sep 2017 16:54:59 +0200 Subject: [PATCH 127/232] drm/amdgpu: fix amdgpu_vm_handle_moved as well v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no guarantee that the last BO_VA actually needed an update. Additional to that all command submissions must wait for moved BOs to be cleared, not just the first one. v2: Don't overwrite any newer fence. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 24 ++++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +-- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ff61073b7181f..9f1202a4182f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -814,7 +814,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync); + r = amdgpu_vm_handle_moved(adev, vm); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 64baa31389657..2df254cc802e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, dma_addr_t *pages_addr = NULL; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; - struct dma_fence *exclusive; + struct dma_fence *exclusive, **last_update; uint64_t flags; int r; @@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, else flags = 0x0; + if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) + last_update = &vm->last_update; + else + last_update = &bo_va->last_pt_update; + if (!clear && bo_va->base.moved) { bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); @@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, - &bo_va->last_pt_update); + last_update); if (r) return r; } @@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, trace_amdgpu_vm_bo_mapping(mapping); } - if (bo_va->base.bo && - bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) { - dma_fence_put(vm->last_update); - vm->last_update = dma_fence_get(bo_va->last_pt_update); - } - return 0; } @@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, * PTs have to be reserved! */ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_sync *sync) + struct amdgpu_vm *vm) { - struct amdgpu_bo_va *bo_va = NULL; bool clear; int r = 0; spin_lock(&vm->status_lock); while (!list_empty(&vm->moved)) { + struct amdgpu_bo_va *bo_va; + bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); @@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, } spin_unlock(&vm->status_lock); - if (bo_va) - r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index cb6a6222fc3f1..48c58ae4bb3a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -250,8 +250,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_sync *sync); + struct amdgpu_vm *vm); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); From 8b39f031b7a65e06d5de99170fa056ea14eaca54 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 09:46:40 -0400 Subject: [PATCH 128/232] drm/amd/powerplay: Tidy up smu7_fan_ctrl_get_fan_speed_info() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index baddb569a8b82..a491f1201fdad 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -37,9 +37,8 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, fan_speed_info->min_percent = 0; fan_speed_info->max_percent = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { fan_speed_info->supports_rpm_read = true; fan_speed_info->supports_rpm_write = true; fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; @@ -581,4 +580,4 @@ void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr) phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller)); return; -} \ No newline at end of file +} From 3efabd5eda4a47eb5dbe0a6f0e7f700252e43965 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 09:51:36 -0400 Subject: [PATCH 129/232] drm/amd/powerplay: Tidy up smu7_fan_ctrl_get_fan_speed_rpm() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index a491f1201fdad..be8367997e75e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -86,8 +86,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) uint32_t crystal_clock_freq; if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0)) + !hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) return -ENODEV; tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, From 30f111fca087b8f644f134eb7fb062614ff08416 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 09:53:25 -0400 Subject: [PATCH 130/232] drm/amd/powerplay: Make use of PP_CAP in smu7_thermal.c Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index be8367997e75e..9425f0400a6a5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -150,13 +150,11 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) { int result; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport)) { + if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) { cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM)) + if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM)) hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanRPM); @@ -207,8 +205,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (speed > 100) speed = 100; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -239,8 +236,7 @@ int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); if (!result) result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); @@ -268,8 +264,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); crystal_clock_freq = smu7_get_xclk(hwmgr); @@ -429,8 +424,7 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, * this function was included in the table. * Make sure that we still think controlling the fan is OK. */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { smu7_fan_ctrl_start_smc_fan_control(hwmgr); smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } From a10ad69c8357b51337aa3acea63d279b38fc198c Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 09:56:45 -0400 Subject: [PATCH 131/232] drm/amd/powerplay: Remove unneeded return from pp_smu7_thermal_fini() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 9425f0400a6a5..a457b884dd7d7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -572,5 +572,4 @@ void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr) { phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller)); - return; } From 0120ad13acb747b6e5e4f21d525818797dc5eeaa Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 10:01:34 -0400 Subject: [PATCH 132/232] drm/amd/powerplay: Make use of PP_CAP in smu7_powertune.c Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/smu7_powertune.c | 42 ++++++++----------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 1dc31aa727817..9b4189cd0851a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -636,7 +636,7 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) else didt_block = DIDTBlock_Info; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0; + block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0; data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; @@ -645,7 +645,7 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block &= ~SQ_Enable_MASK; didt_block |= block_en << SQ_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0; + block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0; data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; @@ -654,7 +654,7 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block &= ~DB_Enable_MASK; didt_block |= block_en << DB_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0; + block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0; data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); @@ -662,7 +662,7 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block &= ~TD_Enable_MASK; didt_block |= block_en << TD_Enable_SHIFT; - block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0; + block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0; data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; @@ -753,10 +753,10 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) if (result == 0) num_se = sys_info.value; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping) || + PP_CAP(PHM_PlatformCaps_DBRamping) || + PP_CAP(PHM_PlatformCaps_TDRamping) || + PP_CAP(PHM_PlatformCaps_TCPRamping)) { cgs_enter_safe_mode(hwmgr->device, true); value = 0; @@ -808,10 +808,10 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) { int result; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + if (PP_CAP(PHM_PlatformCaps_SQRamping) || + PP_CAP(PHM_PlatformCaps_DBRamping) || + PP_CAP(PHM_PlatformCaps_TDRamping) || + PP_CAP(PHM_PlatformCaps_TCPRamping)) { cgs_enter_safe_mode(hwmgr->device, true); @@ -836,8 +836,7 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { + if (PP_CAP(PHM_PlatformCaps_CAC)) { int smc_result; smc_result = smum_send_msg_to_smc(hwmgr->smumgr, (uint16_t)(PPSMC_MSG_EnableCac)); @@ -854,8 +853,7 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { + if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) { int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, (uint16_t)(PPSMC_MSG_DisableCac)); PP_ASSERT_WITH_CODE((smc_result == 0), @@ -899,9 +897,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) else cac_table = hwmgr->dyn_state.cac_dtp_table; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->enable_tdc_limit_feature) { smc_result = smum_send_msg_to_smc(hwmgr->smumgr, (uint16_t)(PPSMC_MSG_TDCLimitEnable)); @@ -937,9 +933,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { int smc_result; if (data->power_containment_features & @@ -987,8 +982,7 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) cac_table = table_info->cac_dtp_table; else cac_table = hwmgr->dyn_state.cac_dtp_table; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { /* adjustment percentage has already been validated */ adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? hwmgr->platform_descriptor.TDPAdjustment : From 9f8ccae9d6c76cc50aca2efacd7cd65b6146daaf Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 10:05:48 -0400 Subject: [PATCH 133/232] drm/amd/powerplay: Tidy up smu7_enable_didt() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/smu7_powertune.c | 29 +++++-------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 9b4189cd0851a..8c264c195e1a5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -629,7 +629,6 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) uint32_t block_en = 0; int32_t result = 0; uint32_t didt_block; - uint32_t data; if (hwmgr->chip_id == CHIP_POLARIS11) didt_block = Polaris11_DIDTBlock_Info; @@ -637,41 +636,29 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block = DIDTBlock_Info; block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_SQ_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~SQ_Enable_MASK; didt_block |= block_en << SQ_Enable_SHIFT; block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_DB_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~DB_Enable_MASK; didt_block |= block_en << DB_Enable_SHIFT; block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0; - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TD_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~TD_Enable_MASK; didt_block |= block_en << TD_Enable_SHIFT; block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0; - - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT, + DIDT_TCP_CTRL0, DIDT_CTRL_EN, block_en); didt_block &= ~TCP_Enable_MASK; didt_block |= block_en << TCP_Enable_SHIFT; - if (enable) result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block); From c9232d9ee85fade8a2020efe9f3bf64cdbbc6347 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 12 Sep 2017 12:29:06 -0400 Subject: [PATCH 134/232] drm/amd/powerplay: lock grbm_gfx index when changing instance Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 8c264c195e1a5..0f75af33e581b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -746,6 +746,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_CAP(PHM_PlatformCaps_TCPRamping)) { cgs_enter_safe_mode(hwmgr->device, true); + cgs_lock_grbm_idx(hwmgr->device, true); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { @@ -785,6 +786,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == result), "Failed to enable DPM DIDT.", return result); } + cgs_lock_grbm_idx(hwmgr->device, false); cgs_enter_safe_mode(hwmgr->device, false); } From a072c5f896beba806b4b867d478e1b90f94ba29b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 11 Sep 2017 17:04:41 +0900 Subject: [PATCH 135/232] amdgpu: Only destroy fbdev framebuffer if it was initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes crash when trying to unload the amdgpu module before the fbdev framebuffer was initialized, which can happen since the DRM fbdev helper code supports deferred setup. Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index b6cb276f0a709..562930b17a6d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb if (rfb->obj) { amdgpufb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; + drm_framebuffer_unregister_private(&rfb->base); + drm_framebuffer_cleanup(&rfb->base); } drm_fb_helper_fini(&rfbdev->helper); - drm_framebuffer_unregister_private(&rfb->base); - drm_framebuffer_cleanup(&rfb->base); return 0; } From e97f12f359775de4fabfb507f836ebffa20f4986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 11 Sep 2017 17:09:17 +0900 Subject: [PATCH 136/232] radeon: Only destroy fbdev framebuffer if it was initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes crash when trying to unload the radeon module before the fbdev framebuffer was initialized, which can happen since the DRM fbdev helper code supports deferred setup. Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_fb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fd25361ac681b..2fcf805d3a169 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -322,10 +322,10 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; + drm_framebuffer_unregister_private(&rfb->base); + drm_framebuffer_cleanup(&rfb->base); } drm_fb_helper_fini(&rfbdev->helper); - drm_framebuffer_unregister_private(&rfb->base); - drm_framebuffer_cleanup(&rfb->base); return 0; } From 9f0ed7aab60e3563bfe247bc2ad82db3a88c2d57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 11 Sep 2017 15:51:30 +0200 Subject: [PATCH 137/232] drm/amdgpu: fix cgs alignment handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This always allocated on PAGE_SIZE alignment. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index fd435a96481c2..892cd8b3483c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -121,7 +121,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, placement.busy_placement = &place; placement.num_busy_placement = 1; - ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, + ret = amdgpu_bo_create_restricted(adev, size, align, true, domain, flags, NULL, &placement, NULL, 0, &obj); From 88531913a841a6354adfb40c78c86599639e5f32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 11 Sep 2017 17:10:26 +0200 Subject: [PATCH 138/232] drm/amd: remove min/max addr handling from cgs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Nobody is actually using this and it causes a bunch of unused and buggy code. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 48 ++----------------- drivers/gpu/drm/amd/include/cgs_common.h | 7 +-- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 2 +- 3 files changed, 6 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 892cd8b3483c5..df3bf22039d58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -45,7 +45,6 @@ struct amdgpu_cgs_device { static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, uint64_t size, uint64_t align, - uint64_t min_offset, uint64_t max_offset, cgs_handle_t *handle) { CGS_FUNC_ADEV; @@ -53,13 +52,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, int ret = 0; uint32_t domain = 0; struct amdgpu_bo *obj; - struct ttm_placement placement; - struct ttm_place place; - - if (min_offset > max_offset) { - BUG_ON(1); - return -EINVAL; - } /* fail if the alignment is not a power of 2 */ if (((align != 1) && (align & (align - 1))) @@ -73,41 +65,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; domain = AMDGPU_GEM_DOMAIN_VRAM; - if (max_offset > adev->mc.real_vram_size) - return -EINVAL; - place.fpfn = min_offset >> PAGE_SHIFT; - place.lpfn = max_offset >> PAGE_SHIFT; - place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; break; case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: case CGS_GPU_MEM_TYPE__INVISIBLE_FB: flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; domain = AMDGPU_GEM_DOMAIN_VRAM; - if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { - place.fpfn = - max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; - place.lpfn = - min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; - place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; - } - break; case CGS_GPU_MEM_TYPE__GART_CACHEABLE: domain = AMDGPU_GEM_DOMAIN_GTT; - place.fpfn = min_offset >> PAGE_SHIFT; - place.lpfn = max_offset >> PAGE_SHIFT; - place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; break; case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; domain = AMDGPU_GEM_DOMAIN_GTT; - place.fpfn = min_offset >> PAGE_SHIFT; - place.lpfn = max_offset >> PAGE_SHIFT; - place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | - TTM_PL_FLAG_UNCACHED; break; default: return -EINVAL; @@ -116,15 +86,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, *handle = 0; - placement.placement = &place; - placement.num_placement = 1; - placement.busy_placement = &place; - placement.num_busy_placement = 1; - - ret = amdgpu_bo_create_restricted(adev, size, align, - true, domain, flags, - NULL, &placement, NULL, - 0, &obj); + ret = amdgpu_bo_create(adev, size, align, true, domain, flags, + NULL, NULL, 0, &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -155,19 +118,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h uint64_t *mcaddr) { int r; - u64 min_offset, max_offset; struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; WARN_ON_ONCE(obj->placement.num_placement > 1); - min_offset = obj->placements[0].fpfn << PAGE_SHIFT; - max_offset = obj->placements[0].lpfn << PAGE_SHIFT; - r = amdgpu_bo_reserve(obj, true); if (unlikely(r != 0)) return r; - r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, - min_offset, max_offset, mcaddr); + r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr); amdgpu_bo_unreserve(obj); return r; } diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 92eaa81f9fcba..2c1f13e04726e 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -193,8 +193,6 @@ struct cgs_acpi_method_info { * @type: memory type * @size: size in bytes * @align: alignment in bytes - * @min_offset: minimum offset from start of heap - * @max_offset: maximum offset from start of heap * @handle: memory handle (output) * * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous @@ -216,7 +214,6 @@ struct cgs_acpi_method_info { */ typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, uint64_t size, uint64_t align, - uint64_t min_offset, uint64_t max_offset, cgs_handle_t *handle); /** @@ -479,8 +476,8 @@ struct cgs_device #define CGS_OS_CALL(func,dev,...) \ (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__)) -#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \ - CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle) +#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \ + CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle) #define cgs_free_gpu_mem(dev,handle) \ CGS_CALL(free_gpu_mem,dev,handle) #define cgs_gmap_gpu_mem(dev,handle,mcaddr) \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 3bdf6478de7fa..e397349ce1c98 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -316,7 +316,7 @@ int smu_allocate_memory(void *device, uint32_t size, return -EINVAL; ret = cgs_alloc_gpu_mem(device, type, size, byte_align, - 0, 0, (cgs_handle_t *)handle); + (cgs_handle_t *)handle); if (ret) return -ENOMEM; From c09312a6532a9a976ec4e72eb3b7fa10e87a8b07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 12 Sep 2017 10:56:17 +0200 Subject: [PATCH 139/232] drm/amdgpu: fix and cleanup amdgpu_bo_create v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We adjusted the BO flags for USWC handling, but those never took effect because the placement was passed in instead of generated inside this function. v2: better commit message Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 83 ++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 8 --- 2 files changed, 23 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 52d0109c0d9c6..726a662f43f49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -64,11 +64,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) return false; } -static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, - struct ttm_placement *placement, - struct ttm_place *places, - u32 domain, u64 flags) +void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) { + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct ttm_placement *placement = &abo->placement; + struct ttm_place *places = abo->placements; + u64 flags = abo->flags; u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { @@ -151,27 +152,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, placement->busy_placement = places; } -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); - - amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements, - domain, abo->flags); -} - -static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, - struct ttm_placement *placement) -{ - BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); - - memcpy(bo->placements, placement->placement, - placement->num_placement * sizeof(struct ttm_place)); - bo->placement.num_placement = placement->num_placement; - bo->placement.num_busy_placement = placement->num_busy_placement; - bo->placement.placement = bo->placements; - bo->placement.busy_placement = bo->placements; -} - /** * amdgpu_bo_create_reserved - create reserved BO for kernel use * @@ -303,14 +283,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, *cpu_addr = NULL; } -int amdgpu_bo_create_restricted(struct amdgpu_device *adev, - unsigned long size, int byte_align, - bool kernel, u32 domain, u64 flags, - struct sg_table *sg, - struct ttm_placement *placement, - struct reservation_object *resv, - uint64_t init_value, - struct amdgpu_bo **bo_ptr) +static int amdgpu_bo_do_create(struct amdgpu_device *adev, + unsigned long size, int byte_align, + bool kernel, u32 domain, u64 flags, + struct sg_table *sg, + struct reservation_object *resv, + uint64_t init_value, + struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; enum ttm_bo_type type; @@ -384,10 +363,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; #endif - amdgpu_fill_placement_to_bo(bo, placement); - /* Kernel allocation are uninterruptible */ + bo->tbo.bdev = &adev->mman.bdev; + amdgpu_ttm_placement_from_domain(bo, domain); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); + /* Kernel allocation are uninterruptible */ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); @@ -442,27 +422,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, unsigned long size, int byte_align, struct amdgpu_bo *bo) { - struct ttm_placement placement = {0}; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; int r; if (bo->shadow) return 0; - memset(&placements, 0, sizeof(placements)); - amdgpu_ttm_placement_init(adev, &placement, placements, - AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, true, - AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW, - NULL, &placement, - bo->tbo.resv, - 0, - &bo->shadow); + r = amdgpu_bo_do_create(adev, size, byte_align, true, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, + NULL, bo->tbo.resv, 0, + &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); mutex_lock(&adev->shadow_list_lock); @@ -484,18 +454,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev, uint64_t init_value, struct amdgpu_bo **bo_ptr) { - struct ttm_placement placement = {0}; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; int r; - memset(&placements, 0, sizeof(placements)); - amdgpu_ttm_placement_init(adev, &placement, placements, - domain, parent_flags); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, - parent_flags, sg, &placement, resv, - init_value, bo_ptr); + r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, + parent_flags, sg, resv, init_value, bo_ptr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a4891bea2ca8c..39b6bf6fb051f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -195,14 +195,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, struct reservation_object *resv, uint64_t init_value, struct amdgpu_bo **bo_ptr); -int amdgpu_bo_create_restricted(struct amdgpu_device *adev, - unsigned long size, int byte_align, - bool kernel, u32 domain, u64 flags, - struct sg_table *sg, - struct ttm_placement *placement, - struct reservation_object *resv, - uint64_t init_value, - struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_reserved(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, From 0d2bd2ae045d8dcb446a3d9a4cecefa70428573a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 13 Sep 2017 10:43:09 +0200 Subject: [PATCH 140/232] drm/ttm: fix memory leak while individualizing BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to free the reservation object before we take the BO from the delayed delete list. Signed-off-by: Christian König Reviewed-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index bee77d31895b3..d79607a1187c8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -557,6 +557,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, } ttm_bo_del_from_lru(bo); + if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv)) + reservation_object_fini(&bo->ttm_resv); list_del_init(&bo->ddestroy); kref_put(&bo->list_kref, ttm_bo_ref_bug); From c30572814df55bc648ca449f69849b988abff54a Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 13 Sep 2017 12:35:15 -0400 Subject: [PATCH 141/232] drm/amd/amdgpu: Change vram debugfs to NO_KIQ for VM environments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fe887f361be83..b2b11e1766671 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1696,9 +1696,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, return result; spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32(mmMM_INDEX_HI, *pos >> 31); - value = RREG32(mmMM_DATA); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); + value = RREG32_NO_KIQ(mmMM_DATA); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); r = put_user(value, (uint32_t *)buf); @@ -1739,9 +1739,9 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, return r; spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32(mmMM_INDEX_HI, *pos >> 31); - WREG32(mmMM_DATA, value); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); + WREG32_NO_KIQ(mmMM_DATA, value); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); result += 4; From 376b6a1f4fbd4938042ec28bd9498c62a4bdcdf0 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Wed, 13 Sep 2017 13:31:09 +0530 Subject: [PATCH 142/232] drivers:gpu:Use ARRAY_SIZE() for the size calculation of the array. Signed-off-by: Allen Pais Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 74cb647da30e0..7ff70762cfc82 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = { {0x135, 0x12a810}, {0x149, 0x7a82c} }; -#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) +#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data)) #define PCTL0_RENG_EXEC_END_PTR 0x151 #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 @@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = { {0x1f0, 0x5000a7f6}, {0x1f1, 0x5000a7e4} }; -#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) +#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data)) #define PCTL1_RENG_EXEC_END_PTR 0x1f1 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 From cf661ec1ff146183a6f8da1f1d41c1e3c4afe8bd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Sep 2017 17:36:19 -0400 Subject: [PATCH 143/232] drm/amdgpu/psp: declare raven psp firmware So it gets picked up properly by the kernel. Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 6ec5c9f8074d7..77cab1ff0254f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -35,6 +35,8 @@ #include "raven1/GC/gc_9_1_offset.h" #include "raven1/SDMA0/sdma0_4_1_offset.h" +MODULE_FIRMWARE("amdgpu/raven_asd.bin"); + static int psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) { From e88206465578c4a7b9ebf2a69d040c0dc0fbc920 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 15 Sep 2017 22:06:58 +0200 Subject: [PATCH 144/232] drm/radeon: properly initialize r600_audio_status() data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The structure returned from r600_audio_status() is only partially initialized, and older gcc versions (4.3 and 4.4) warn about this: drivers/gpu/drm/radeon/r600_hdmi.c: In function 'r600_audio_status': drivers/gpu/drm/radeon/r600_hdmi.c:108: error: 'status.id' is used uninitialized in this function drivers/gpu/drm/radeon/r600_hdmi.c:108: error: 'status.connected' is used uninitialized in this function drivers/gpu/drm/radeon/r600_hdmi.c:108: error: 'status.offset' is used uninitialized in this function This is harmless and surprisingly correct in C99, as the caller only accesses the fields that got initialized, so newer compilers don't warn about it, but initializing the entire structure feels like the right thing to do here and avoids the warning. Reviewed-by: Christian König Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e82a99cb24596..ab32830c4e237 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -58,7 +58,7 @@ enum r600_hdmi_iec_status_bits { static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) { - struct r600_audio_pin status; + struct r600_audio_pin status = {}; uint32_t value; value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); From 97bae49c44454549e7e22ef02020decafa099f04 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 14 Sep 2017 08:57:26 -0400 Subject: [PATCH 145/232] drm/amd/amdgpu: Support VM environments in amdgpu_ttm_access_memory() Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b2b11e1766671..8ee16dfdb8af3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1201,14 +1201,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, } spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); - WREG32(mmMM_INDEX_HI, aligned_pos >> 31); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); if (!write || mask != 0xffffffff) - value = RREG32(mmMM_DATA); + value = RREG32_NO_KIQ(mmMM_DATA); if (write) { value &= ~mask; value |= (*(uint32_t *)buf << shift) & mask; - WREG32(mmMM_DATA, value); + WREG32_NO_KIQ(mmMM_DATA, value); } spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); if (!write) { From 1ab4720441ed6a918c06c4ddf882ef8bc50c8ef0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 4 Sep 2017 16:25:48 +0800 Subject: [PATCH 146/232] drm/amd/powerplay: refine code for thermal control in powerplay add function point start_thermal_controller in hwmgr, delete thermal function table and related functions Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 6 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 + .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 +- .../drm/amd/powerplay/hwmgr/smu7_thermal.c | 146 +++--------------- .../drm/amd/powerplay/hwmgr/smu7_thermal.h | 5 +- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +- .../drm/amd/powerplay/hwmgr/vega10_thermal.c | 137 +++------------- .../drm/amd/powerplay/hwmgr/vega10_thermal.h | 13 +- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 3 +- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 6 +- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 6 +- 11 files changed, 55 insertions(+), 274 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index ce378bd216613..a3991c0dff2ec 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -306,8 +306,10 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRa range.min = temperature_range->min; } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), &range, NULL); + PHM_PlatformCaps_ThermalController) + && hwmgr->hwmgr_func->start_thermal_controller != NULL) + return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 4f1b932361b25..387d0b62100ca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -73,6 +73,7 @@ int hwmgr_early_init(struct pp_instance *handle) hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); + hwmgr->fan_ctrl_is_in_default_mode = true; switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index dfe06d98304ce..c631b1926be76 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2284,7 +2284,6 @@ static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - pp_smu7_thermal_fini(hwmgr); kfree(hwmgr->backend); hwmgr->backend = NULL; @@ -2301,8 +2300,6 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return -ENOMEM; hwmgr->backend = data; - pp_smu7_thermal_initialize(hwmgr); - smu7_patch_voltage_workaround(hwmgr); smu7_init_dpm_defaults(hwmgr); @@ -4661,6 +4658,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .set_power_profile_state = smu7_set_power_profile_state, .avfs_control = smu7_avfs_control, .disable_smc_firmware_ctf = smu7_thermal_disable_alert, + .start_thermal_controller = smu7_start_thermal_controller, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index a457b884dd7d7..10e12b2b43858 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -360,7 +360,7 @@ static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr) * * @param hwmgr The address of the hardware manager. */ -int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) +static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) { uint32_t alert; @@ -371,7 +371,7 @@ int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to enable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); } /** @@ -416,8 +416,7 @@ int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) * @param Result the last failure code * @return result from set temperature range routine */ -static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +static int smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr) { /* If the fantable setup has failed we could have disabled * PHM_PlatformCaps_MicrocodeFanControl even after @@ -432,108 +431,34 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, return 0; } -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) { - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + int ret = 0; if (range == NULL) return -EINVAL; - return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return smu7_thermal_disable_alert(hwmgr); -} + smu7_thermal_initialize(hwmgr); + ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); + if (ret) + return -EINVAL; + smu7_thermal_enable_alert(hwmgr); + ret = smum_thermal_avfs_enable(hwmgr); + if (ret) + return -EINVAL; -static const struct phm_master_table_item -phm_thermal_start_thermal_controller_master_list[] = { - { .tableFunction = tf_smu7_thermal_initialize }, - { .tableFunction = tf_smu7_thermal_set_temperature_range }, - { .tableFunction = tf_smu7_thermal_enable_alert }, - { .tableFunction = smum_thermal_avfs_enable }, /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - { .tableFunction = smum_thermal_setup_fan_table }, - { .tableFunction = tf_smu7_thermal_start_smc_fan_control }, - { } -}; - -static const struct phm_master_table_header -phm_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - phm_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item -phm_thermal_set_temperature_range_master_list[] = { - { .tableFunction = tf_smu7_thermal_disable_alert }, - { .tableFunction = tf_smu7_thermal_set_temperature_range }, - { .tableFunction = tf_smu7_thermal_enable_alert }, - { } -}; - -static const struct phm_master_table_header -phm_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - phm_thermal_set_temperature_range_master_list -}; + smum_thermal_setup_fan_table(hwmgr); + smu7_thermal_start_smc_fan_control(hwmgr); + return 0; +} + + int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) { @@ -542,34 +467,3 @@ int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) return 0; } -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &phm_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &phm_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - -void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr) -{ - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller)); -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h index ba71b608fa752..42c1ba0fad785 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h @@ -46,14 +46,13 @@ extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr); -extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr); extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr); extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *temperature_range); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 4d7bd9fc91a8f..0eb3c2907de95 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -5060,6 +5060,6 @@ int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) { hwmgr->hwmgr_func = &vega10_hwmgr_funcs; hwmgr->pptable_func = &vega10_pptable_funcs; - pp_vega10_thermal_initialize(hwmgr); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 5b3c443d4e948..ce873e40a8fdb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -528,8 +528,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) * @param Result the last failure code * @return result from set temperature range routine */ -int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { int ret; struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -593,8 +592,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from set temperature range routine */ -int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr) { /* If the fantable setup has failed we could have disabled * PHM_PlatformCaps_MicrocodeFanControl even after @@ -607,107 +605,37 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, return 0; } -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) + +int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) { - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + int ret = 0; if (range == NULL) return -EINVAL; - return vega10_thermal_set_temperature_range(hwmgr, range); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return vega10_thermal_disable_alert(hwmgr); -} + vega10_thermal_initialize(hwmgr); + ret = vega10_thermal_set_temperature_range(hwmgr, range); + if (ret) + return -EINVAL; -static struct phm_master_table_item -vega10_thermal_start_thermal_controller_master_list[] = { - { .tableFunction = tf_vega10_thermal_initialize }, - { .tableFunction = tf_vega10_thermal_set_temperature_range }, - { .tableFunction = tf_vega10_thermal_enable_alert }, + vega10_thermal_enable_alert(hwmgr); /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - { .tableFunction = tf_vega10_thermal_setup_fan_table }, - { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, - { } -}; + ret = vega10_thermal_setup_fan_table(hwmgr); + if (ret) + return -EINVAL; -static struct phm_master_table_header -vega10_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - vega10_thermal_start_thermal_controller_master_list -}; + vega10_thermal_start_smc_fan_control(hwmgr); -static struct phm_master_table_item -vega10_thermal_set_temperature_range_master_list[] = { - { .tableFunction = tf_vega10_thermal_disable_alert }, - { .tableFunction = tf_vega10_thermal_set_temperature_range }, - { .tableFunction = tf_vega10_thermal_enable_alert }, - { } + return 0; }; -struct phm_master_table_header -vega10_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - vega10_thermal_set_temperature_range_master_list -}; + + int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) { @@ -717,32 +645,3 @@ int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) } return 0; } - -/** -* Initializes the thermal controller related functions -* in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &vega10_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &vega10_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, - &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h index 776f3a2effc0a..f34ce04cfd890 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h @@ -50,13 +50,6 @@ struct vega10_temperature { #define FDO_PWM_MODE_STATIC_RPM 5 -extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); - extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr); extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, @@ -69,7 +62,6 @@ extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr); extern int vega10_thermal_ctrl_uninitialize_thermal_controller( struct pp_hwmgr *hwmgr); extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, @@ -77,9 +69,10 @@ extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr); -int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); + +extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 3bbe7d5cb6de1..831add4486144 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -380,6 +380,7 @@ struct pp_hwmgr_func { int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); }; struct pp_table_func { @@ -766,8 +767,6 @@ struct pp_hwmgr { struct phm_runtime_table_header set_power_state; struct phm_runtime_table_header enable_clock_power_gatings; struct phm_runtime_table_header display_configuration_changed; - struct phm_runtime_table_header start_thermal_controller; - struct phm_runtime_table_header set_temperature_range; const struct pp_hwmgr_func *hwmgr_func; const struct pp_table_func *pptable_func; struct pp_power_state *ps; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 5d61cc9d45544..eb6609116a7af 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -186,10 +186,8 @@ extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); -extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); -extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result); +extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr); +extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index e397349ce1c98..4527c07bc6794 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -114,8 +114,7 @@ int smum_early_init(struct pp_instance *handle) return 0; } -int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); @@ -123,8 +122,7 @@ int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, return 0; } -int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); From 06d31a69ca4a069d4ad79be570eb0ccbd8d65db5 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 4 Sep 2017 17:51:28 +0800 Subject: [PATCH 147/232] drm/amd/powerplay: refine powerplay code for CZ/ST 1. add function points instand of creat function tables 2. implement stop dpm tasks for CZ/ST Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../amd/powerplay/hwmgr/cz_clockpowergating.c | 30 -- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 377 ++++++++---------- 2 files changed, 159 insertions(+), 248 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b33935fcf4283..5baf1a48294a5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -103,16 +103,6 @@ int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr) return 0; } -static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) -{ - return 0; -} - -static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result) -{ - return 0; -} - int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -221,23 +211,3 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) return 0; } - -static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = { - /*we don't need an exit table here, because there is only D3 cold on Kv*/ - { - .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating, - .tableFunction = cz_tf_uvd_power_gating_initialize - }, - { - .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating, - .tableFunction = cz_tf_vce_power_gating_initialize - }, - /* to do { NULL, cz_tf_xdma_power_gating_enable }, */ - { } -}; - -const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = { - 0, - PHM_MasterTableFlag_None, - cz_enable_clock_power_gatings_list -}; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 7f3b24f42e30a..7efe00881aa83 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -440,14 +440,7 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) return 0; } -static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) -{ - return 0; -} - -static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) { struct SMU8_Fusion_ClkTable *clock_table; int ret; @@ -566,8 +559,7 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input, return ret; } -static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_clock_voltage_dependency_table *table = @@ -593,8 +585,7 @@ static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_uvd_clock_voltage_dependency_table *table = @@ -621,8 +612,7 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_vce_clock_voltage_dependency_table *table = @@ -649,8 +639,7 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_acp_clock_voltage_dependency_table *table = @@ -676,8 +665,7 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -686,22 +674,16 @@ static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, cz_hwmgr->samu_power_gated = false; cz_hwmgr->acp_power_gated = false; cz_hwmgr->pgacpinit = true; - - return 0; } -static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); cz_hwmgr->low_sclk_interrupt_threshold = 0; - - return 0; } -static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) + +static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct phm_clock_voltage_dependency_table *table = @@ -774,9 +756,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, return 0; } -static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { @@ -794,9 +774,7 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr, return 0; } -static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -808,63 +786,72 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, return 0; } -static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) { + struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + + if (hw_data->is_nb_dpm_enabled) { + if (enable) { + PP_DBG_LOG("enable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_EnableLowMemoryPstate, + (lock ? 1 : 0)); + } else { + PP_DBG_LOG("disable Low Memory PState.\n"); + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_DisableLowMemoryPstate, + (lock ? 1 : 0)); + } + } + return 0; } - -static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) { int ret = 0; struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); unsigned long dpm_features = 0; - if (!cz_hwmgr->is_nb_dpm_enabled) { - PP_DBG_LOG("enabling ALL SMU features.\n"); + if (cz_hwmgr->is_nb_dpm_enabled) { + cz_nbdpm_pstate_enable_disable(hwmgr, true, true); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, + PPSMC_MSG_DisableAllSmuFeatures, dpm_features); if (ret == 0) - cz_hwmgr->is_nb_dpm_enabled = true; + cz_hwmgr->is_nb_dpm_enabled = false; } return ret; } -static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock) +static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) { - struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); - - if (hw_data->is_nb_dpm_enabled) { - if (enable) { - PP_DBG_LOG("enable Low Memory PState.\n"); + int ret = 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_EnableLowMemoryPstate, - (lock ? 1 : 0)); - } else { - PP_DBG_LOG("disable Low Memory PState.\n"); + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_DisableLowMemoryPstate, - (lock ? 1 : 0)); - } + if (!cz_hwmgr->is_nb_dpm_enabled) { + PP_DBG_LOG("enabling ALL SMU features.\n"); + dpm_features |= NB_DPM_MASK; + ret = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); + if (ret == 0) + cz_hwmgr->is_nb_dpm_enabled = true; } - return 0; + return ret; } -static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input) { bool disable_switch; bool enable_low_mem_state; @@ -886,64 +873,64 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item cz_set_power_state_list[] = { - { .tableFunction = cz_tf_update_sclk_limit }, - { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold }, - { .tableFunction = cz_tf_set_watermark_threshold }, - { .tableFunction = cz_tf_set_enabled_levels }, - { .tableFunction = cz_tf_enable_nb_dpm }, - { .tableFunction = cz_tf_update_low_mem_pstate }, - { } -}; +static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int ret = 0; -static const struct phm_master_table_header cz_set_power_state_master = { - 0, - PHM_MasterTableFlag_None, - cz_set_power_state_list -}; + cz_update_sclk_limit(hwmgr); + cz_set_deep_sleep_sclk_threshold(hwmgr); + cz_set_watermark_threshold(hwmgr); + ret = cz_enable_nb_dpm(hwmgr); + if (ret) + return ret; + cz_update_low_mem_pstate(hwmgr, input); -static const struct phm_master_table_item cz_setup_asic_list[] = { - { .tableFunction = cz_tf_reset_active_process_mask }, - { .tableFunction = cz_tf_upload_pptable_to_smu }, - { .tableFunction = cz_tf_init_sclk_limit }, - { .tableFunction = cz_tf_init_uvd_limit }, - { .tableFunction = cz_tf_init_vce_limit }, - { .tableFunction = cz_tf_init_acp_limit }, - { .tableFunction = cz_tf_init_power_gate_state }, - { .tableFunction = cz_tf_init_sclk_threshold }, - { } + return 0; }; -static const struct phm_master_table_header cz_setup_asic_master = { - 0, - PHM_MasterTableFlag_None, - cz_setup_asic_list -}; -static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int cz_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int ret; + + ret = cz_upload_pptable_to_smu(hwmgr); + if (ret) + return ret; + ret = cz_init_sclk_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_uvd_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_vce_limit(hwmgr); + if (ret) + return ret; + ret = cz_init_acp_limit(hwmgr); + if (ret) + return ret; + + cz_init_power_gate_state(hwmgr); + cz_init_sclk_threshold(hwmgr); + + return 0; +} + +static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); + hw_data->disp_clk_bypass_pending = false; hw_data->disp_clk_bypass = false; - - return 0; } -static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); - hw_data->is_nb_dpm_enabled = false; - return 0; + hw_data->is_nb_dpm_enabled = false; } -static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); @@ -951,50 +938,60 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, hw_data->cc6_settings.cpu_pstate_separation_time = 0; hw_data->cc6_settings.cpu_cc6_disable = false; hw_data->cc6_settings.cpu_pstate_disable = false; - - return 0; } -static const struct phm_master_table_item cz_power_down_asic_list[] = { - { .tableFunction = cz_tf_power_up_display_clock_sys_pll }, - { .tableFunction = cz_tf_clear_nb_dpm_flag }, - { .tableFunction = cz_tf_reset_cc6_data }, - { } -}; - -static const struct phm_master_table_header cz_power_down_asic_master = { - 0, - PHM_MasterTableFlag_None, - cz_power_down_asic_list +static int cz_power_off_asic(struct pp_hwmgr *hwmgr) +{ + cz_power_up_display_clock_sys_pll(hwmgr); + cz_clear_nb_dpm_flag(hwmgr); + cz_reset_cc6_data(hwmgr); + return 0; }; -static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_program_voting_clients(struct pp_hwmgr *hwmgr) { PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); - return 0; } -static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output, - void *storage, int result) +static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0); +} + +static int cz_start_dpm(struct pp_hwmgr *hwmgr) { - int res = 0xff; + int ret = 0; struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); unsigned long dpm_features = 0; cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; dpm_features |= SCLK_DPM_MASK; - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); - return res; + return ret; +} + +static int cz_stop_dpm(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + unsigned long dpm_features = 0; + + if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { + dpm_features |= SCLK_DPM_MASK; + cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_DisableAllSmuFeatures, + dpm_features); + } + return ret; } -static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -1016,13 +1013,11 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, return 0; } -static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); cz_hwmgr->acp_boot_level = 0xff; - return 0; } static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, @@ -1038,60 +1033,45 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, return true; } - return result; + return false; } -static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr) { if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return PP_Result_TableImmediateExit; - return 0; + return true; + return false; } -static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { - /* TO DO */ - return 0; -} + if (!cz_check_for_dpm_enabled(hwmgr)) { + pr_info("dpm has been disabled\n"); + return 0; + } + cz_disable_nb_dpm(hwmgr); -static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) -{ - if (!cz_dpm_check_smu_features(hwmgr, - SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return PP_Result_TableImmediateExit; - return 0; -} + cz_clear_voting_clients(hwmgr); + if (cz_stop_dpm(hwmgr)) + return -EINVAL; -static const struct phm_master_table_item cz_disable_dpm_list[] = { - { .tableFunction = cz_tf_check_for_dpm_enabled }, - { }, + return 0; }; +static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + if (cz_check_for_dpm_enabled(hwmgr)) { + pr_info("dpm has been enabled\n"); + return 0; + } -static const struct phm_master_table_header cz_disable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - cz_disable_dpm_list -}; - -static const struct phm_master_table_item cz_enable_dpm_list[] = { - { .tableFunction = cz_tf_check_for_dpm_disabled }, - { .tableFunction = cz_tf_program_voting_clients }, - { .tableFunction = cz_tf_start_dpm }, - { .tableFunction = cz_tf_program_bootup_state }, - { .tableFunction = cz_tf_enable_didt }, - { .tableFunction = cz_tf_reset_acp_boot_level }, - { }, -}; + cz_program_voting_clients(hwmgr); + if (cz_start_dpm(hwmgr)) + return -EINVAL; + cz_program_bootup_state(hwmgr); + cz_reset_acp_boot_level(hwmgr); -static const struct phm_master_table_header cz_enable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - cz_enable_dpm_list + return 0; }; static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, @@ -1162,7 +1142,8 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return -ENOMEM; hwmgr->backend = data; - + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { pr_err("cz_initialize_dpm_defaults failed\n"); @@ -1177,58 +1158,14 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) cz_construct_boot_state(hwmgr); - result = phm_construct_table(hwmgr, &cz_setup_asic_master, - &(hwmgr->setup_asic)); - if (result != 0) { - pr_err("Fail to construct setup ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &cz_power_down_asic_master, - &(hwmgr->power_down_asic)); - if (result != 0) { - pr_err("Fail to construct power down ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &cz_disable_dpm_master, - &(hwmgr->disable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to disable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &cz_enable_dpm_master, - &(hwmgr->enable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to enable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &cz_set_power_state_master, - &(hwmgr->set_power_state)); - if (result != 0) { - pr_err("Fail to construct set_power_state\n"); - return result; - } hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; - result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); - if (result != 0) { - pr_err("Fail to construct enable_clock_power_gatings\n"); - return result; - } return result; } static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { if (hwmgr != NULL) { - phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings)); - phm_destroy_table(hwmgr, &(hwmgr->set_power_state)); - phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); - phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; @@ -1938,7 +1875,6 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, - .asic_setup = NULL, .apply_state_adjust_rules = cz_apply_state_adjust_rules, .force_dpm_level = cz_dpm_force_dpm_level, .get_power_state_size = cz_get_power_state_size, @@ -1960,6 +1896,11 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .get_clock_by_type = cz_get_clock_by_type, .get_max_high_clocks = cz_get_max_high_clocks, .read_sensor = cz_read_sensor, + .power_off_asic = cz_power_off_asic, + .asic_setup = cz_setup_asic_task, + .dynamic_state_management_enable = cz_enable_dpm_tasks, + .power_state_set = cz_set_power_state_tasks, + .dynamic_state_management_disable = cz_disable_dpm_tasks, }; int cz_init_function_pointers(struct pp_hwmgr *hwmgr) From cf2623d951c1c52923a776e01cf2e2afc9d042a0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 4 Sep 2017 18:11:52 +0800 Subject: [PATCH 148/232] drm/amd/powerplay: refine powerplay code for RV use function points instand of function table. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 137 +++++------------- 1 file changed, 33 insertions(+), 104 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index d5a9c0792de00..da74f95fb86b9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -157,8 +157,7 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) return 0; } -static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); struct PP_Clocks clocks = {0}; @@ -234,19 +233,12 @@ static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) return 0; } -static const struct phm_master_table_item rv_set_power_state_list[] = { - { NULL, rv_tf_set_clock_limit }, - { } -}; - -static const struct phm_master_table_header rv_set_power_state_master = { - 0, - PHM_MasterTableFlag_None, - rv_set_power_state_list -}; +static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + return rv_set_clock_limit(hwmgr, input); +} -static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result) +static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -257,20 +249,13 @@ static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input, return 0; } -static const struct phm_master_table_item rv_setup_asic_list[] = { - { .tableFunction = rv_tf_init_power_gate_state }, - { } -}; -static const struct phm_master_table_header rv_setup_asic_master = { - 0, - PHM_MasterTableFlag_None, - rv_setup_asic_list -}; +static int rv_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + return rv_init_power_gate_state(hwmgr); +} -static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -282,21 +267,12 @@ static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item rv_power_down_asic_list[] = { - { .tableFunction = rv_tf_reset_cc6_data }, - { } -}; - -static const struct phm_master_table_header rv_power_down_asic_master = { - 0, - PHM_MasterTableFlag_None, - rv_power_down_asic_list -}; - +static int rv_power_off_asic(struct pp_hwmgr *hwmgr) +{ + return rv_reset_cc6_data(hwmgr); +} -static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -307,21 +283,12 @@ static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item rv_disable_dpm_list[] = { - {NULL, rv_tf_disable_gfx_off}, - { }, -}; - - -static const struct phm_master_table_header rv_disable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - rv_disable_dpm_list -}; +static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + return rv_disable_gfx_off(hwmgr); +} -static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr, - void *input, void *output, - void *storage, int result) +static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) { struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); @@ -332,16 +299,10 @@ static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr, return 0; } -static const struct phm_master_table_item rv_enable_dpm_list[] = { - {NULL, rv_tf_enable_gfx_off}, - { }, -}; - -static const struct phm_master_table_header rv_enable_dpm_master = { - 0, - PHM_MasterTableFlag_None, - rv_enable_dpm_list -}; +static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + return rv_enable_gfx_off(hwmgr); +} static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, @@ -474,6 +435,9 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + result = rv_initialize_dpm_defaults(hwmgr); if (result != 0) { pr_err("rv_initialize_dpm_defaults failed\n"); @@ -490,40 +454,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) rv_construct_boot_state(hwmgr); - result = phm_construct_table(hwmgr, &rv_setup_asic_master, - &(hwmgr->setup_asic)); - if (result != 0) { - pr_err("Fail to construct setup ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_power_down_asic_master, - &(hwmgr->power_down_asic)); - if (result != 0) { - pr_err("Fail to construct power down ASIC\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_set_power_state_master, - &(hwmgr->set_power_state)); - if (result != 0) { - pr_err("Fail to construct set_power_state\n"); - return result; - } - - result = phm_construct_table(hwmgr, &rv_disable_dpm_master, - &(hwmgr->disable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to disable_dynamic_state\n"); - return result; - } - result = phm_construct_table(hwmgr, &rv_enable_dpm_master, - &(hwmgr->enable_dynamic_state_management)); - if (result != 0) { - pr_err("Fail to enable_dynamic_state\n"); - return result; - } - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = RAVEN_MAX_HARDWARE_POWERLEVELS; @@ -546,12 +476,6 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); - phm_destroy_table(hwmgr, &(hwmgr->set_power_state)); - phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); - phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); - phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); - kfree(pinfo->vdd_dep_on_dcefclk); pinfo->vdd_dep_on_dcefclk = NULL; kfree(pinfo->vdd_dep_on_socclk); @@ -946,6 +870,11 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = { .read_sensor = rv_read_sensor, .set_active_display_count = rv_set_active_display_count, .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk, + .dynamic_state_management_enable = rv_enable_dpm_tasks, + .power_off_asic = rv_power_off_asic, + .asic_setup = rv_setup_asic_task, + .power_state_set = rv_set_power_state_tasks, + .dynamic_state_management_disable = rv_disable_dpm_tasks, }; int rv_init_function_pointers(struct pp_hwmgr *hwmgr) From 698f88e697cc8852558d120fdecfdb38c18c2ff7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 4 Sep 2017 18:22:02 +0800 Subject: [PATCH 149/232] drm/amd/powerplay: delete dead code in powerplay delete functiontable related codes Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 2 +- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 3 +- .../drm/amd/powerplay/hwmgr/functiontables.c | 161 ------------------ .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 93 +++------- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 12 -- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 3 - .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 - .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 - drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 49 ------ 9 files changed, 22 insertions(+), 310 deletions(-) delete mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 79119d6cd07f2..dc4bbcfe12439 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -2,7 +2,7 @@ # Makefile for the 'hw manager' sub-component of powerplay. # It provides the hardware management services for the driver. -HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ +HARDWARE_MGR = hwmgr.o processpptables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ cz_clockpowergating.o pppcielanes.o\ process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 7efe00881aa83..d3b46462072d1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1142,8 +1142,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return -ENOMEM; hwmgr->backend = data; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); + result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { pr_err("cz_initialize_dpm_defaults failed\n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c deleted file mode 100644 index bc7d8bd7e7cbe..0000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include "hwmgr.h" - -static int phm_run_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, - void *output, - void *temp_storage) -{ - int result = 0; - phm_table_function *function; - - if (rt_table->function_list == NULL) { - pr_debug("this function not implement!\n"); - return 0; - } - - for (function = rt_table->function_list; NULL != *function; function++) { - int tmp = (*function)(hwmgr, input, output, temp_storage, result); - - if (tmp == PP_Result_TableImmediateExit) - break; - if (tmp) { - if (0 == result) - result = tmp; - if (rt_table->exit_error) - break; - } - } - - return result; -} - -int phm_dispatch_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, void *output) -{ - int result; - void *temp_storage; - - if (hwmgr == NULL || rt_table == NULL) { - pr_err("Invalid Parameter!\n"); - return -EINVAL; - } - - if (0 != rt_table->storage_size) { - temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL); - if (temp_storage == NULL) { - pr_err("Could not allocate table temporary storage\n"); - return -ENOMEM; - } - } else { - temp_storage = NULL; - } - - result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); - - kfree(temp_storage); - - return result; -} - -int phm_construct_table(struct pp_hwmgr *hwmgr, - const struct phm_master_table_header *master_table, - struct phm_runtime_table_header *rt_table) -{ - uint32_t function_count = 0; - const struct phm_master_table_item *table_item; - uint32_t size; - phm_table_function *run_time_list; - phm_table_function *rtf; - - if (hwmgr == NULL || master_table == NULL || rt_table == NULL) { - pr_err("Invalid Parameter!\n"); - return -EINVAL; - } - - for (table_item = master_table->master_list; - NULL != table_item->tableFunction; table_item++) { - if ((NULL == table_item->isFunctionNeededInRuntimeTable) || - (table_item->isFunctionNeededInRuntimeTable(hwmgr))) - function_count++; - } - - size = (function_count + 1) * sizeof(phm_table_function); - run_time_list = kzalloc(size, GFP_KERNEL); - - if (NULL == run_time_list) - return -ENOMEM; - - rtf = run_time_list; - for (table_item = master_table->master_list; - NULL != table_item->tableFunction; table_item++) { - if ((rtf - run_time_list) > function_count) { - pr_err("Check function results have changed\n"); - kfree(run_time_list); - return -EINVAL; - } - - if ((NULL == table_item->isFunctionNeededInRuntimeTable) || - (table_item->isFunctionNeededInRuntimeTable(hwmgr))) { - *(rtf++) = table_item->tableFunction; - } - } - - if ((rtf - run_time_list) > function_count) { - pr_err("Check function results have changed\n"); - kfree(run_time_list); - return -EINVAL; - } - - *rtf = NULL; - rt_table->function_list = run_time_list; - rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError)); - rt_table->storage_size = master_table->storage_size; - return 0; -} - -int phm_destroy_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table) -{ - if (hwmgr == NULL || rt_table == NULL) { - pr_err("Invalid Parameter\n"); - return -EINVAL; - } - - if (NULL == rt_table->function_list) - return 0; - - kfree(rt_table->function_list); - - rt_table->function_list = NULL; - rt_table->storage_size = 0; - rt_table->exit_error = false; - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index a3991c0dff2ec..fa4fbc25ebe1b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -36,29 +36,12 @@ return -EINVAL; \ } while (0) -bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr) -{ - return hwmgr->block_hw_access; -} - -int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block) -{ - hwmgr->block_hw_access = block; - return 0; -} - int phm_setup_asic(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->asic_setup) - return hwmgr->hwmgr_func->asic_setup(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->asic_setup) + return hwmgr->hwmgr_func->asic_setup(hwmgr); return 0; } @@ -67,14 +50,8 @@ int phm_power_down_asic(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->power_off_asic) - return hwmgr->hwmgr_func->power_off_asic(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->power_off_asic) + return hwmgr->hwmgr_func->power_off_asic(hwmgr); return 0; } @@ -90,13 +67,8 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr, states.pcurrent_state = pcurrent_state; states.pnew_state = pnew_power_state; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->power_state_set) - return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL); - } + if (NULL != hwmgr->hwmgr_func->power_state_set) + return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); return 0; } @@ -107,15 +79,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) bool enabled; PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) - ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); - } else { - ret = phm_dispatch_table(hwmgr, - &(hwmgr->enable_dynamic_state_management), - NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) + ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); enabled = ret == 0; @@ -131,15 +96,8 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (hwmgr->hwmgr_func->dynamic_state_management_disable) - ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); - } else { - ret = phm_dispatch_table(hwmgr, - &(hwmgr->disable_dynamic_state_management), - NULL, NULL); - } + if (hwmgr->hwmgr_func->dynamic_state_management_disable) + ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); enabled = ret == 0 ? false : true; @@ -219,13 +177,9 @@ int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) - return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); - } else { - return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL); - } + if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) + return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); + return 0; } @@ -233,11 +187,9 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating) - return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr); - } + if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating) + return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr); + return 0; } @@ -246,12 +198,9 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) { - if (NULL != hwmgr->hwmgr_func->display_config_changed) - hwmgr->hwmgr_func->display_config_changed(hwmgr); - } else - return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL); + if (NULL != hwmgr->hwmgr_func->display_config_changed) + hwmgr->hwmgr_func->display_config_changed(hwmgr); + return 0; } @@ -259,9 +208,7 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface)) - if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) + if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 387d0b62100ca..e3bf69c924fd7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -852,10 +852,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - if (hwmgr->chip_id != CHIP_POLARIS10) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); @@ -882,9 +878,6 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - return 0; } @@ -904,9 +897,6 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - return 0; } @@ -920,8 +910,6 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TDRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index da74f95fb86b9..594b978f46d97 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -435,9 +435,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - result = rv_initialize_dpm_defaults(hwmgr); if (result != 0) { pr_err("rv_initialize_dpm_defaults failed\n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c631b1926be76..8b8e202b45b42 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3854,9 +3854,6 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); } @@ -3959,9 +3956,6 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 0eb3c2907de95..ad34178b4ae3f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -200,9 +200,6 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDCI); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableSMU7ThermalManagement); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 831add4486144..2a10d81741265 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -109,10 +109,6 @@ enum PHM_BackEnd_Magic { #define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2 #define PHM_PCIE_POWERGATING_TARGET_PHY 3 -typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input, - void *output, void *storage, int result); - -typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr); struct phm_set_power_state_input { const struct pp_hw_power_state *pcurrent_state; @@ -149,30 +145,6 @@ struct phm_gfx_arbiter { uint32_t fclk; }; -/* Entries in the master tables */ -struct phm_master_table_item { - phm_check_function isFunctionNeededInRuntimeTable; - phm_table_function tableFunction; -}; - -enum phm_master_table_flag { - PHM_MasterTableFlag_None = 0, - PHM_MasterTableFlag_ExitOnError = 1, -}; - -/* The header of the master tables */ -struct phm_master_table_header { - uint32_t storage_size; - uint32_t flags; - const struct phm_master_table_item *master_list; -}; - -struct phm_runtime_table_header { - uint32_t storage_size; - bool exit_error; - phm_table_function *function_list; -}; - struct phm_clock_array { uint32_t count; uint32_t values[1]; @@ -216,19 +188,6 @@ struct phm_phase_shedding_limits_record { uint32_t Mclk; }; - -extern int phm_dispatch_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table, - void *input, void *output); - -extern int phm_construct_table(struct pp_hwmgr *hwmgr, - const struct phm_master_table_header *master_table, - struct phm_runtime_table_header *rt_table); - -extern int phm_destroy_table(struct pp_hwmgr *hwmgr, - struct phm_runtime_table_header *rt_table); - - struct phm_uvd_clock_voltage_dependency_record { uint32_t vclk; uint32_t dclk; @@ -749,7 +708,6 @@ struct pp_hwmgr { enum amd_dpm_forced_level dpm_level; enum amd_dpm_forced_level saved_dpm_level; enum amd_dpm_forced_level request_dpm_level; - bool block_hw_access; struct phm_gfx_arbiter gfx_arbiter; struct phm_acp_arbiter acp_arbiter; struct phm_uvd_arbiter uvd_arbiter; @@ -760,13 +718,6 @@ struct pp_hwmgr { void *backend; enum PP_DAL_POWERLEVEL dal_power_level; struct phm_dynamic_state_info dyn_state; - struct phm_runtime_table_header setup_asic; - struct phm_runtime_table_header power_down_asic; - struct phm_runtime_table_header disable_dynamic_state_management; - struct phm_runtime_table_header enable_dynamic_state_management; - struct phm_runtime_table_header set_power_state; - struct phm_runtime_table_header enable_clock_power_gatings; - struct phm_runtime_table_header display_configuration_changed; const struct pp_hwmgr_func *hwmgr_func; const struct pp_table_func *pptable_func; struct pp_power_state *ps; From cfa289fd4986c504b0396cce167802dfcf4943d0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 6 Sep 2017 15:27:59 +0800 Subject: [PATCH 150/232] drm/amdgpu: rename amdgpu_dpm_funcs to amd_pm_funcs renamed amdgpu_dpm_funcs and moved to amd_shared.h so can shared with powerplay. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 47 +------- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 100 ++++++++++++------ drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 53 ++++++---- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 69 +++++++----- drivers/gpu/drm/amd/include/amd_shared.h | 51 +++++++++ .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 6 -- 7 files changed, 201 insertions(+), 129 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 1cb52fd190603..e997ebbe43ea0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes) } struct amd_vce_state* -amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx) +amdgpu_get_vce_clock_state(void *handle, u32 idx) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (idx < adev->pm.dpm.num_of_vce_states) return &adev->pm.dpm.vce_states[idx]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 3eba4137508be..2f2bdb032d30b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -241,49 +241,6 @@ enum amdgpu_pcie_gen { AMDGPU_PCIE_GEN_INVALID = 0xffff }; -struct amdgpu_dpm_funcs { - int (*get_temperature)(struct amdgpu_device *adev); - int (*pre_set_power_state)(struct amdgpu_device *adev); - int (*set_power_state)(struct amdgpu_device *adev); - void (*post_set_power_state)(struct amdgpu_device *adev); - void (*display_configuration_changed)(struct amdgpu_device *adev); - u32 (*get_sclk)(struct amdgpu_device *adev, bool low); - u32 (*get_mclk)(struct amdgpu_device *adev, bool low); - void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); - void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); - int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level); - bool (*vblank_too_short)(struct amdgpu_device *adev); - void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); - void (*powergate_vce)(struct amdgpu_device *adev, bool gate); - void (*enable_bapm)(struct amdgpu_device *adev, bool enable); - void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); - u32 (*get_fan_control_mode)(struct amdgpu_device *adev); - int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); - int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); - int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); - int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); - int (*get_sclk_od)(struct amdgpu_device *adev); - int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); - int (*get_mclk_od)(struct amdgpu_device *adev); - int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); - int (*check_state_equal)(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, - bool *equal); - int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value, - int *size); - - struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx); - int (*reset_power_profile_state)(struct amdgpu_device *adev, - struct amd_pp_profile *request); - int (*get_power_profile_state)(struct amdgpu_device *adev, - struct amd_pp_profile *query); - int (*set_power_profile_state)(struct amdgpu_device *adev, - struct amd_pp_profile *request); - int (*switch_power_profile)(struct amdgpu_device *adev, - enum amd_pp_profile_type type); -}; - #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) @@ -485,7 +442,7 @@ struct amdgpu_pm { struct amdgpu_dpm dpm; const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; - const struct amdgpu_dpm_funcs *funcs; + const struct amd_pm_funcs *funcs; uint32_t pcie_gen_mask; uint32_t pcie_mlw_mask; struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ @@ -551,6 +508,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, u8 amdgpu_encode_pci_lane_width(u32 lanes); struct amd_vce_state* -amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx); +amdgpu_get_vce_clock_state(void *handle, u32 idx); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index cb508a211b2f6..bdf792822ff5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -883,8 +883,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev) return ret; } -static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) +static void ci_dpm_powergate_uvd(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); pi->uvd_power_gated = gate; @@ -901,8 +902,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) } } -static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) +static bool ci_dpm_vblank_too_short(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; @@ -1210,11 +1212,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) } } -static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev, +static int ci_dpm_get_fan_speed_percent(void *handle, u32 *speed) { u32 duty, duty100; u64 tmp64; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.no_fan) return -ENOENT; @@ -1237,12 +1240,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev, return 0; } -static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, +static int ci_dpm_set_fan_speed_percent(void *handle, u32 speed) { u32 tmp; u32 duty, duty100; u64 tmp64; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); if (adev->pm.no_fan) @@ -1271,8 +1275,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, return 0; } -static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) +static void ci_dpm_set_fan_control_mode(void *handle, u32 mode) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + switch (mode) { case AMD_FAN_CTRL_NONE: if (adev->pm.dpm.fan.ucode_fan_control) @@ -1292,8 +1298,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) } } -static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev) +static u32 ci_dpm_get_fan_control_mode(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); if (pi->fan_is_controlled_by_smc) @@ -4378,9 +4385,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev, } -static int ci_dpm_force_performance_level(struct amdgpu_device *adev, +static int ci_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); u32 tmp, levels, i; int ret; @@ -5291,8 +5299,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev, adev->pm.dpm.requested_ps = &pi->requested_rps; } -static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) +static int ci_dpm_pre_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps *new_ps = &requested_ps; @@ -5304,8 +5313,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) return 0; } -static void ci_dpm_post_set_power_state(struct amdgpu_device *adev) +static void ci_dpm_post_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct amdgpu_ps *new_ps = &pi->requested_rps; @@ -5479,8 +5489,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev) ci_update_current_ps(adev, boot_ps); } -static int ci_dpm_set_power_state(struct amdgpu_device *adev) +static int ci_dpm_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *old_ps = &pi->current_rps; @@ -5551,8 +5562,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev) } #endif -static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev) +static void ci_dpm_display_configuration_changed(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + ci_program_display_gap(adev); } @@ -6105,9 +6118,10 @@ static int ci_dpm_init(struct amdgpu_device *adev) } static void -ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, +ci_dpm_debugfs_print_current_performance_level(void *handle, struct seq_file *m) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct amdgpu_ps *rps = &pi->current_rps; u32 sclk = ci_get_average_sclk_freq(adev); @@ -6131,12 +6145,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, seq_printf(m, "GPU load: %u %%\n", activity_percent); } -static void ci_dpm_print_power_state(struct amdgpu_device *adev, - struct amdgpu_ps *rps) +static void ci_dpm_print_power_state(void *handle, void *current_ps) { + struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; struct ci_ps *ps = ci_get_ps(rps); struct ci_pl *pl; int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_dpm_print_class_info(rps->class, rps->class2); amdgpu_dpm_print_cap_info(rps->caps); @@ -6158,20 +6173,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1, (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); } -static int ci_check_state_equal(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, +static int ci_check_state_equal(void *handle, + void *current_ps, + void *request_ps, bool *equal) { struct ci_ps *ci_cps; struct ci_ps *ci_rps; int i; + struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) return -EINVAL; - ci_cps = ci_get_ps(cps); - ci_rps = ci_get_ps(rps); + ci_cps = ci_get_ps((struct amdgpu_ps *)cps); + ci_rps = ci_get_ps((struct amdgpu_ps *)rps); if (ci_cps == NULL) { *equal = false; @@ -6199,8 +6217,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev, return 0; } -static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) +static u32 ci_dpm_get_sclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); @@ -6210,8 +6229,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; } -static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low) +static u32 ci_dpm_get_mclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); @@ -6222,10 +6242,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low) } /* get temperature in millidegrees */ -static int ci_dpm_get_temp(struct amdgpu_device *adev) +static int ci_dpm_get_temp(void *handle) { u32 temp; int actual_temp = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; @@ -6551,9 +6572,10 @@ static int ci_dpm_set_powergating_state(void *handle, return 0; } -static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, +static int ci_dpm_print_clock_levels(void *handle, enum pp_clock_type type, char *buf) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; @@ -6618,9 +6640,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, return size; } -static int ci_dpm_force_clock_level(struct amdgpu_device *adev, +static int ci_dpm_force_clock_level(void *handle, enum pp_clock_type type, uint32_t mask) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO | @@ -6664,8 +6687,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev, return 0; } -static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) +static int ci_dpm_get_sclk_od(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); struct ci_single_dpm_table *golden_sclk_table = @@ -6680,8 +6704,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) return value; } -static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +static int ci_dpm_set_sclk_od(void *handle, uint32_t value) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); struct ci_single_dpm_table *golden_sclk_table = @@ -6698,8 +6723,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) return 0; } -static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) +static int ci_dpm_get_mclk_od(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); struct ci_single_dpm_table *golden_mclk_table = @@ -6714,8 +6740,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) return value; } -static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +static int ci_dpm_set_mclk_od(void *handle, uint32_t value) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); struct ci_single_dpm_table *golden_mclk_table = @@ -6732,9 +6759,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) return 0; } -static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev, +static int ci_dpm_get_power_profile_state(void *handle, struct amd_pp_profile *query) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); if (!pi || !query) @@ -6851,9 +6879,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev, return result; } -static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev, +static int ci_dpm_set_power_profile_state(void *handle, struct amd_pp_profile *request) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); int ret = -1; @@ -6906,9 +6935,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev, return 0; } -static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev, +static int ci_dpm_reset_power_profile_state(void *handle, struct amd_pp_profile *request) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); if (!pi || !request) @@ -6927,9 +6957,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev, return -EINVAL; } -static int ci_dpm_switch_power_profile(struct amdgpu_device *adev, +static int ci_dpm_switch_power_profile(void *handle, enum amd_pp_profile_type type) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct ci_power_info *pi = ci_get_pi(adev); struct amd_pp_profile request = {0}; @@ -6944,11 +6975,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev, return 0; } -static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx, +static int ci_dpm_read_sensor(void *handle, int idx, void *value, int *size) { u32 activity_percent = 50; int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* size must be at least 4 bytes for all sensors */ if (*size < 4) @@ -7003,7 +7035,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = { .set_powergating_state = ci_dpm_set_powergating_state, }; -static const struct amdgpu_dpm_funcs ci_dpm_funcs = { +static const struct amd_pm_funcs ci_dpm_funcs = { .get_temperature = &ci_dpm_get_temp, .pre_set_power_state = &ci_dpm_pre_set_power_state, .set_power_state = &ci_dpm_set_power_state, diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3bbf2ccfca89c..f68d7abe1ed9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -64,7 +64,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp); static int kv_init_fps_limits(struct amdgpu_device *adev); -static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); +static void kv_dpm_powergate_uvd(void *handle, bool gate); static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1245,8 +1245,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev, adev->pm.dpm.requested_ps = &pi->requested_rps; } -static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) +static void kv_dpm_enable_bapm(void *handle, bool enable) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); int ret; @@ -1672,8 +1673,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) return kv_enable_acp_dpm(adev, !gate); } -static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) +static void kv_dpm_powergate_uvd(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); int ret; @@ -1868,10 +1870,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev, return ret; } -static int kv_dpm_force_performance_level(struct amdgpu_device *adev, +static int kv_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (level == AMD_DPM_FORCED_LEVEL_HIGH) { ret = kv_force_dpm_highest(adev); @@ -1892,8 +1895,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev, return 0; } -static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) +static int kv_dpm_pre_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps *new_ps = &requested_ps; @@ -1907,8 +1911,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) return 0; } -static int kv_dpm_set_power_state(struct amdgpu_device *adev) +static int kv_dpm_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); struct amdgpu_ps *new_ps = &pi->requested_rps; struct amdgpu_ps *old_ps = &pi->current_rps; @@ -1981,8 +1986,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev) return 0; } -static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) +static void kv_dpm_post_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); struct amdgpu_ps *new_ps = &pi->requested_rps; @@ -2848,9 +2854,10 @@ static int kv_dpm_init(struct amdgpu_device *adev) } static void -kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, +kv_dpm_debugfs_print_current_performance_level(void *handle, struct seq_file *m) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); u32 current_index = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & @@ -2875,11 +2882,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, } static void -kv_dpm_print_power_state(struct amdgpu_device *adev, - struct amdgpu_ps *rps) +kv_dpm_print_power_state(void *handle, void *request_ps) { int i; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; struct kv_ps *ps = kv_get_ps(rps); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_dpm_print_class_info(rps->class, rps->class2); amdgpu_dpm_print_cap_info(rps->caps); @@ -2905,13 +2913,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev) amdgpu_free_extended_power_table(adev); } -static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) +static void kv_dpm_display_configuration_changed(void *handle) { } -static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) +static u32 kv_dpm_get_sclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); @@ -2921,18 +2930,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) return requested_state->levels[requested_state->num_levels - 1].sclk; } -static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) +static u32 kv_dpm_get_mclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); return pi->sys_info.bootup_uma_clk; } /* get temperature in millidegrees */ -static int kv_dpm_get_temp(struct amdgpu_device *adev) +static int kv_dpm_get_temp(void *handle) { u32 temp; int actual_temp = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; temp = RREG32_SMC(0xC0300E0C); @@ -3222,14 +3233,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); } -static int kv_check_state_equal(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, +static int kv_check_state_equal(void *handle, + void *current_ps, + void *request_ps, bool *equal) { struct kv_ps *kv_cps; struct kv_ps *kv_rps; int i; + struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) return -EINVAL; @@ -3262,9 +3276,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev, return 0; } -static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx, +static int kv_dpm_read_sensor(void *handle, int idx, void *value, int *size) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); uint32_t sclk; u32 pl_index = @@ -3312,7 +3327,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = { .set_powergating_state = kv_dpm_set_powergating_state, }; -static const struct amdgpu_dpm_funcs kv_dpm_funcs = { +static const struct amd_pm_funcs kv_dpm_funcs = { .get_temperature = &kv_dpm_get_temp, .pre_set_power_state = &kv_dpm_pre_set_power_state, .set_power_state = &kv_dpm_set_power_state, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index d63873f3f5743..05b3dbf585d59 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3060,9 +3060,9 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev, return ret; } -static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) +static bool si_dpm_vblank_too_short(void *handle) { - + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; @@ -3871,9 +3871,10 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad 0 : -EINVAL; } -static int si_dpm_force_performance_level(struct amdgpu_device *adev, +static int si_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ps *rps = adev->pm.dpm.current_ps; struct si_ps *ps = si_get_ps(rps); u32 levels = ps->performance_level_count; @@ -6575,11 +6576,12 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) } } -static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev, +static int si_dpm_get_fan_speed_percent(void *handle, u32 *speed) { u32 duty, duty100; u64 tmp64; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.no_fan) return -ENOENT; @@ -6600,9 +6602,10 @@ static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev, return 0; } -static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev, +static int si_dpm_set_fan_speed_percent(void *handle, u32 speed) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; u32 duty, duty100; @@ -6633,8 +6636,10 @@ static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev, return 0; } -static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) +static void si_dpm_set_fan_control_mode(void *handle, u32 mode) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (mode) { /* stop auto-manage */ if (adev->pm.dpm.fan.ucode_fan_control) @@ -6649,8 +6654,9 @@ static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) } } -static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev) +static u32 si_dpm_get_fan_control_mode(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; @@ -6946,8 +6952,9 @@ static void si_dpm_disable(struct amdgpu_device *adev) ni_update_current_ps(adev, boot_ps); } -static int si_dpm_pre_set_power_state(struct amdgpu_device *adev) +static int si_dpm_pre_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; struct amdgpu_ps *new_ps = &requested_ps; @@ -6984,8 +6991,9 @@ static int si_power_control_set_level(struct amdgpu_device *adev) return 0; } -static int si_dpm_set_power_state(struct amdgpu_device *adev) +static int si_dpm_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *new_ps = &eg_pi->requested_rps; struct amdgpu_ps *old_ps = &eg_pi->current_rps; @@ -7086,8 +7094,9 @@ static int si_dpm_set_power_state(struct amdgpu_device *adev) return 0; } -static void si_dpm_post_set_power_state(struct amdgpu_device *adev) +static void si_dpm_post_set_power_state(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *new_ps = &eg_pi->requested_rps; @@ -7103,8 +7112,10 @@ void si_dpm_reset_asic(struct amdgpu_device *adev) } #endif -static void si_dpm_display_configuration_changed(struct amdgpu_device *adev) +static void si_dpm_display_configuration_changed(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + si_program_display_gap(adev); } @@ -7486,9 +7497,10 @@ static void si_dpm_fini(struct amdgpu_device *adev) amdgpu_free_extended_power_table(adev); } -static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, +static void si_dpm_debugfs_print_current_performance_level(void *handle, struct seq_file *m) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *rps = &eg_pi->current_rps; struct si_ps *ps = si_get_ps(rps); @@ -7860,10 +7872,11 @@ static int si_dpm_set_powergating_state(void *handle, } /* get temperature in millidegrees */ -static int si_dpm_get_temp(struct amdgpu_device *adev) +static int si_dpm_get_temp(void *handle) { u32 temp; int actual_temp = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> CTF_TEMP_SHIFT; @@ -7878,8 +7891,9 @@ static int si_dpm_get_temp(struct amdgpu_device *adev) return actual_temp; } -static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) +static u32 si_dpm_get_sclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); @@ -7889,8 +7903,9 @@ static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; } -static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) +static u32 si_dpm_get_mclk(void *handle, bool low) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); @@ -7900,9 +7915,11 @@ static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; } -static void si_dpm_print_power_state(struct amdgpu_device *adev, - struct amdgpu_ps *rps) +static void si_dpm_print_power_state(void *handle, + void *current_ps) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; struct si_ps *ps = si_get_ps(rps); struct rv7xx_pl *pl; int i; @@ -7942,20 +7959,23 @@ static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, (si_cpl1->vddci == si_cpl2->vddci)); } -static int si_check_state_equal(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, +static int si_check_state_equal(void *handle, + void *current_ps, + void *request_ps, bool *equal) { struct si_ps *si_cps; struct si_ps *si_rps; int i; + struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) return -EINVAL; - si_cps = si_get_ps(cps); - si_rps = si_get_ps(rps); + si_cps = si_get_ps((struct amdgpu_ps *)cps); + si_rps = si_get_ps((struct amdgpu_ps *)rps); if (si_cps == NULL) { printk("si_cps is NULL\n"); @@ -7983,9 +8003,10 @@ static int si_check_state_equal(struct amdgpu_device *adev, return 0; } -static int si_dpm_read_sensor(struct amdgpu_device *adev, int idx, +static int si_dpm_read_sensor(void *handle, int idx, void *value, int *size) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct amdgpu_ps *rps = &eg_pi->current_rps; struct si_ps *ps = si_get_ps(rps); @@ -8041,7 +8062,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = { .set_powergating_state = si_dpm_set_powergating_state, }; -static const struct amdgpu_dpm_funcs si_dpm_funcs = { +static const struct amd_pm_funcs si_dpm_funcs = { .get_temperature = &si_dpm_get_temp, .pre_set_power_state = &si_dpm_pre_set_power_state, .set_power_state = &si_dpm_set_power_state, diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 70e8c20acb2fd..140ff64383903 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -25,6 +25,8 @@ #define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */ +struct seq_file; + /* * Supported ASIC types */ @@ -144,6 +146,12 @@ enum amd_fan_ctrl_mode { AMD_FAN_CTRL_AUTO = 2, }; +enum pp_clock_type { + PP_SCLK, + PP_MCLK, + PP_PCIE, +}; + /* CG flags */ #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) @@ -249,4 +257,47 @@ struct amd_ip_funcs { void (*get_clockgating_state)(void *handle, u32 *flags); }; +struct amd_pm_funcs { + int (*get_temperature)(void *handle); + int (*pre_set_power_state)(void *handle); + int (*set_power_state)(void *handle); + void (*post_set_power_state)(void *handle); + void (*display_configuration_changed)(void *handle); + u32 (*get_sclk)(void *handle, bool low); + u32 (*get_mclk)(void *handle, bool low); + void (*print_power_state)(void *handle, void *ps); + void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m); + int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level); + bool (*vblank_too_short)(void *handle); + void (*powergate_uvd)(void *handle, bool gate); + void (*powergate_vce)(void *handle, bool gate); + void (*enable_bapm)(void *handle, bool enable); + void (*set_fan_control_mode)(void *handle, u32 mode); + u32 (*get_fan_control_mode)(void *handle); + int (*set_fan_speed_percent)(void *handle, u32 speed); + int (*get_fan_speed_percent)(void *handle, u32 *speed); + int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(void *handle); + int (*set_sclk_od)(void *handle, uint32_t value); + int (*get_mclk_od)(void *handle); + int (*set_mclk_od)(void *handle, uint32_t value); + int (*check_state_equal)(void *handle, + void *cps, + void *rps, + bool *equal); + int (*read_sensor)(void *handle, int idx, void *value, + int *size); + + struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx); + int (*reset_power_profile_state)(void *handle, + struct amd_pp_profile *request); + int (*get_power_profile_state)(void *handle, + struct amd_pp_profile *query); + int (*set_power_profile_state)(void *handle, + struct amd_pp_profile *request); + int (*switch_power_profile)(void *handle, + enum amd_pp_profile_type type); +}; + #endif /* __AMD_SHARED_H__ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index f471b99f456bd..832340b8d1afb 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -213,12 +213,6 @@ enum { PP_GROUP_MAX }; -enum pp_clock_type { - PP_SCLK, - PP_MCLK, - PP_PCIE, -}; - struct pp_states_info { uint32_t nums; uint32_t states[16]; From f93f0c3a7e8635a507b3f084f5f8b48441c79c9d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 6 Sep 2017 16:08:03 +0800 Subject: [PATCH 151/232] drm/amd/powerplay: use struct amd_pm_funcs in powerplay Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 15 ++++++ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 50 +++++++++---------- .../amd/powerplay/hwmgr/cz_clockpowergating.c | 8 +-- .../amd/powerplay/hwmgr/cz_clockpowergating.h | 4 +- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 4 +- .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 18 ------- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 4 +- .../powerplay/hwmgr/smu7_clockpowergating.c | 6 +-- .../powerplay/hwmgr/smu7_clockpowergating.h | 4 +- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 20 +++----- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 28 +++++------ .../gpu/drm/amd/powerplay/inc/amd_powerplay.h | 43 +--------------- .../drm/amd/powerplay/inc/hardwaremanager.h | 2 - drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 12 ++--- 14 files changed, 80 insertions(+), 138 deletions(-) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 140ff64383903..20457bb5a9060 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -257,6 +257,10 @@ struct amd_ip_funcs { void (*get_clockgating_state)(void *handle, u32 *flags); }; +enum amd_pp_task; + +struct pp_states_info; + struct amd_pm_funcs { int (*get_temperature)(void *handle); int (*pre_set_power_state)(void *handle); @@ -298,6 +302,17 @@ struct amd_pm_funcs { struct amd_pp_profile *request); int (*switch_power_profile)(void *handle, enum amd_pp_profile_type type); + int (*load_firmware)(void *handle); + int (*wait_for_fw_loading_complete)(void *handle); + enum amd_dpm_forced_level (*get_performance_level)(void *handle); + enum amd_pm_state_type (*get_current_power_state)(void *handle); + int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, + void *input, void *output); + int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm); + int (*get_pp_num_states)(void *handle, struct pp_states_info *data); + int (*get_pp_table)(void *handle, char **table); + int (*set_pp_table)(void *handle, const char *buf, size_t size); }; + #endif /* __AMD_SHARED_H__ */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 75c810f93e9e0..2634d792404af 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -376,11 +376,12 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( return level; } -static int pp_dpm_get_sclk(void *handle, bool low) +static uint32_t pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t clk = 0; ret = pp_check(pp_handle); @@ -394,16 +395,17 @@ static int pp_dpm_get_sclk(void *handle, bool low) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low); + clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); mutex_unlock(&pp_handle->pp_lock); - return ret; + return clk; } -static int pp_dpm_get_mclk(void *handle, bool low) +static uint32_t pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t clk = 0; ret = pp_check(pp_handle); @@ -417,12 +419,12 @@ static int pp_dpm_get_mclk(void *handle, bool low) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low); + clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); mutex_unlock(&pp_handle->pp_lock); - return ret; + return clk; } -static int pp_dpm_powergate_vce(void *handle, bool gate) +static void pp_dpm_powergate_vce(void *handle, bool gate) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -431,21 +433,20 @@ static int pp_dpm_powergate_vce(void *handle, bool gate) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_vce == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); + hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); mutex_unlock(&pp_handle->pp_lock); - return ret; } -static int pp_dpm_powergate_uvd(void *handle, bool gate) +static void pp_dpm_powergate_uvd(void *handle, bool gate) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -454,18 +455,17 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); + hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); mutex_unlock(&pp_handle->pp_lock); - return ret; } static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, @@ -530,7 +530,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) return pm_type; } -static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) +static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -539,25 +539,25 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) ret = pp_check(pp_handle); if (ret != 0) - return ret; + return; hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { pr_info("%s was not implemented.\n", __func__); - return 0; + return; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); + hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); mutex_unlock(&pp_handle->pp_lock); - return ret; } -static int pp_dpm_get_fan_control_mode(void *handle) +static uint32_t pp_dpm_get_fan_control_mode(void *handle) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + uint32_t mode = 0; ret = pp_check(pp_handle); @@ -571,9 +571,9 @@ static int pp_dpm_get_fan_control_mode(void *handle) return 0; } mutex_lock(&pp_handle->pp_lock); - ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); + mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); mutex_unlock(&pp_handle->pp_lock); - return ret; + return mode; } static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) @@ -1096,7 +1096,7 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -const struct amd_powerplay_funcs pp_dpm_funcs = { +const struct amd_pm_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 5baf1a48294a5..576b61eb6b8f7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -147,7 +147,7 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) } -int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -173,10 +173,9 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_update_uvd_dpm(hwmgr, false); } - return 0; } -int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -205,9 +204,6 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_CG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); - return 0; } - - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h index 1954ceaed439e..92f707bc46e76 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h @@ -29,8 +29,8 @@ extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master; -extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); #endif /* _CZ_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index d3b46462072d1..045fdb3da056d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1393,14 +1393,14 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) return 0; } -static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); return cz_hwmgr->sys_info.bootup_uma_clock; } -static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct cz_power_state *cz_ps; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fa4fbc25ebe1b..7462f9562b890 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -155,24 +155,6 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) return 0; } -int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powergate_uvd != NULL) - return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); - return 0; -} - -int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate) -{ - PHM_FUNC_CHECK(hwmgr); - - if (hwmgr->hwmgr_func->powergate_vce != NULL) - return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); - return 0; -} - int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 594b978f46d97..603035a5a4dbd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -501,12 +501,12 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return 0; } -static int rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { return 0; } -static int rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 261b828ad5908..48f60dedac2bf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -140,7 +140,7 @@ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) return 0; } -int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -166,10 +166,9 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) smu7_update_uvd_dpm(hwmgr, false); } - return 0; } -int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -194,7 +193,6 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_PG_STATE_UNGATE); smu7_update_vce_dpm(hwmgr, false); } - return 0; } int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index c96ed9ed7eaff..7b54d48b2ce26 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -27,8 +27,8 @@ #include "smu7_hwmgr.h" #include "pp_asicblocks.h" -int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 8b8e202b45b42..f1f1e4b390ca0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2798,7 +2798,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } -static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct smu7_power_state *smu7_ps; @@ -2820,7 +2820,7 @@ static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) [smu7_ps->performance_level_count-1].memory_clock; } -static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct smu7_power_state *smu7_ps; @@ -4302,31 +4302,27 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } -static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - int result = 0; - switch (mode) { case AMD_FAN_CTRL_NONE: - result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); break; case AMD_FAN_CTRL_MANUAL: if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); break; case AMD_FAN_CTRL_AUTO: - result = smu7_fan_ctrl_set_static_mode(hwmgr, mode); - if (!result) - result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); + if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) + smu7_fan_ctrl_start_smc_fan_control(hwmgr); break; default: break; } - return result; } -static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) +static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) { return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ad34178b4ae3f..bd20d551e7198 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3872,7 +3872,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, return 0; } -static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct vega10_power_state *vega10_ps; @@ -3894,7 +3894,7 @@ static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) [vega10_ps->performance_level_count - 1].gfx_clock; } -static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { struct pp_power_state *ps; struct vega10_power_state *vega10_ps; @@ -4216,27 +4216,23 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo return 0; } -static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - int result = 0; - switch (mode) { case AMD_FAN_CTRL_NONE: - result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); break; case AMD_FAN_CTRL_MANUAL: if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) - result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); break; case AMD_FAN_CTRL_AUTO: - result = vega10_fan_ctrl_set_static_mode(hwmgr, mode); - if (!result) - result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); + if (!vega10_fan_ctrl_set_static_mode(hwmgr, mode)) + vega10_fan_ctrl_start_smc_fan_control(hwmgr); break; default: break; } - return result; } static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, @@ -4282,7 +4278,7 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return ret; } -static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) +static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -4697,20 +4693,20 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) return 0; } -static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) +static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); data->vce_power_gated = bgate; - return vega10_enable_disable_vce_dpm(hwmgr, !bgate); + vega10_enable_disable_vce_dpm(hwmgr, !bgate); } -static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); data->uvd_power_gated = bgate; - return vega10_enable_disable_uvd_dpm(hwmgr, !bgate); + vega10_enable_disable_uvd_dpm(hwmgr, !bgate); } static inline bool vega10_are_power_levels_equal( diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 832340b8d1afb..435da26477271 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -31,7 +31,7 @@ #include "dm_pp_interface.h" extern const struct amd_ip_funcs pp_ip_funcs; -extern const struct amd_powerplay_funcs pp_dpm_funcs; +extern const struct amd_pm_funcs pp_dpm_funcs; #define PP_DPM_DISABLED 0xCCCC @@ -267,49 +267,10 @@ struct pp_display_clock_request { support << PP_STATE_SUPPORT_SHIFT |\ state << PP_STATE_SHIFT) -struct amd_powerplay_funcs { - int (*get_temperature)(void *handle); - int (*load_firmware)(void *handle); - int (*wait_for_fw_loading_complete)(void *handle); - int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level); - enum amd_dpm_forced_level (*get_performance_level)(void *handle); - enum amd_pm_state_type (*get_current_power_state)(void *handle); - int (*get_sclk)(void *handle, bool low); - int (*get_mclk)(void *handle, bool low); - int (*powergate_vce)(void *handle, bool gate); - int (*powergate_uvd)(void *handle, bool gate); - int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, - void *input, void *output); - int (*set_fan_control_mode)(void *handle, uint32_t mode); - int (*get_fan_control_mode)(void *handle); - int (*set_fan_speed_percent)(void *handle, uint32_t percent); - int (*get_fan_speed_percent)(void *handle, uint32_t *speed); - int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm); - int (*get_pp_num_states)(void *handle, struct pp_states_info *data); - int (*get_pp_table)(void *handle, char **table); - int (*set_pp_table)(void *handle, const char *buf, size_t size); - int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); - int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); - int (*get_sclk_od)(void *handle); - int (*set_sclk_od)(void *handle, uint32_t value); - int (*get_mclk_od)(void *handle); - int (*set_mclk_od)(void *handle, uint32_t value); - int (*read_sensor)(void *handle, int idx, void *value, int *size); - struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx); - int (*reset_power_profile_state)(void *handle, - struct amd_pp_profile *request); - int (*get_power_profile_state)(void *handle, - struct amd_pp_profile *query); - int (*set_power_profile_state)(void *handle, - struct amd_pp_profile *request); - int (*switch_power_profile)(void *handle, - enum amd_pp_profile_type type); -}; - struct amd_powerplay { void *pp_handle; const struct amd_ip_funcs *ip_funcs; - const struct amd_powerplay_funcs *pp_funcs; + const struct amd_pm_funcs *pp_funcs; }; int amd_powerplay_create(struct amd_pp_init *pp_init, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 03adece4efea6..629990f505dde 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -375,8 +375,6 @@ struct phm_odn_clock_levels { extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); -extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 2a10d81741265..f4b6f0ebda754 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -270,10 +270,10 @@ struct pp_hwmgr_func { unsigned long, struct pp_power_state *); int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); - int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); - int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); - int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); - int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); + void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); + void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); + uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); + uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, const void *state); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); @@ -287,8 +287,8 @@ struct pp_hwmgr_func { int (*get_temperature)(struct pp_hwmgr *hwmgr); int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); - int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); - int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); + void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); + uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent); int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed); int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent); From cd4d74648b8021f0d0c7cc31a92fb4ea436a0019 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 6 Sep 2017 18:43:52 +0800 Subject: [PATCH 152/232] drm/amdgpu: unify the interface of amd_pm_funcs put amd_pm_funcs table in struct powerplay for all asics. Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 120 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 236 ++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 3 + drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 10 +- drivers/gpu/drm/amd/amdgpu/cik_dpm.h | 3 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 10 +- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 10 +- drivers/gpu/drm/amd/amdgpu/si_dpm.h | 1 + 9 files changed, 161 insertions(+), 237 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 77a32b79e8f2b..b0109ebe0a1ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3498,10 +3498,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, valuesize = sizeof(values); if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize); - else if (adev->pm.funcs && adev->pm.funcs->read_sensor) - r = adev->pm.funcs->read_sensor(adev, idx, &values[0], - &valuesize); + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); else return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 2f2bdb032d30b..f79f9ea58b172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -241,134 +241,119 @@ enum amdgpu_pcie_gen { AMDGPU_PCIE_GEN_INVALID = 0xffff }; -#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) -#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) -#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) -#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) -#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) -#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) -#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_dpm_pre_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_post_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_display_configuration_changed(adev) \ + ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_print_power_state(adev, ps) \ + ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) + +#define amdgpu_dpm_vblank_too_short(adev) \ + ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_enable_bapm(adev, e) \ + ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) #define amdgpu_dpm_read_sensor(adev, idx, value, size) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \ - (adev)->pm.funcs->read_sensor((adev), (idx), (value), (size))) + ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size))) #define amdgpu_dpm_get_temperature(adev) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_temperature((adev))) + ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle)) #define amdgpu_dpm_set_fan_control_mode(adev, m) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->set_fan_control_mode((adev), (m))) + ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) #define amdgpu_dpm_get_fan_control_mode(adev) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_fan_control_mode((adev))) + ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle)) #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) + ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s))) #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) + ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s))) #define amdgpu_dpm_get_fan_speed_rpm(adev, s) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \ - -EINVAL) + ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) #define amdgpu_dpm_get_sclk(adev, l) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_sclk((adev), (l))) + ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l))) #define amdgpu_dpm_get_mclk(adev, l) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_mclk((adev), (l))) - + ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l))) #define amdgpu_dpm_force_performance_level(adev, l) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->force_performance_level((adev), (l))) + ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l))) #define amdgpu_dpm_powergate_uvd(adev, g) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_uvd((adev), (g))) + ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g))) #define amdgpu_dpm_powergate_vce(adev, g) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_vce((adev), (g))) + ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g))) #define amdgpu_dpm_get_current_power_state(adev) \ - (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) + ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) #define amdgpu_dpm_get_pp_num_states(adev, data) \ - (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) + ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) #define amdgpu_dpm_get_pp_table(adev, table) \ - (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) + ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)) #define amdgpu_dpm_set_pp_table(adev, buf, size) \ - (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) + ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)) #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ - (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) + ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)) #define amdgpu_dpm_force_clock_level(adev, type, level) \ - (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) + ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)) #define amdgpu_dpm_get_sclk_od(adev) \ - (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)) #define amdgpu_dpm_set_sclk_od(adev, value) \ - (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)) #define amdgpu_dpm_get_mclk_od(adev) \ - ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) #define amdgpu_dpm_set_mclk_od(adev, value) \ - ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) #define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \ - ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) + ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) -#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal)) +#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ + ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) #define amdgpu_dpm_get_vce_clock_state(adev, i) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \ - (adev)->pm.funcs->get_vce_clock_state((adev), (i))) + ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i))) -#define amdgpu_dpm_get_performance_level(adev) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \ - (adev)->pm.dpm.forced_level) +#define amdgpu_dpm_get_performance_level(adev) \ + ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)) #define amdgpu_dpm_reset_power_profile_state(adev, request) \ - ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ + ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) #define amdgpu_dpm_get_power_profile_state(adev, query) \ - ((adev)->powerplay.pp_funcs->get_power_profile_state(\ + ((adev)->powerplay.pp_funcs->get_power_profile_state(\ (adev)->powerplay.pp_handle, query)) #define amdgpu_dpm_set_power_profile_state(adev, request) \ - ((adev)->powerplay.pp_funcs->set_power_profile_state(\ + ((adev)->powerplay.pp_funcs->set_power_profile_state(\ (adev)->powerplay.pp_handle, request)) #define amdgpu_dpm_switch_power_profile(adev, type) \ - ((adev)->powerplay.pp_funcs->switch_power_profile(\ + ((adev)->powerplay.pp_funcs->switch_power_profile(\ (adev)->powerplay.pp_handle, type)) struct amdgpu_dpm { @@ -442,7 +427,6 @@ struct amdgpu_pm { struct amdgpu_dpm dpm; const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; - const struct amd_pm_funcs *funcs; uint32_t pcie_gen_mask; uint32_t pcie_mlw_mask; struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 10c5d78081edf..f6ce52956e6d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -74,7 +74,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.dpm.ac_power = true; else adev->pm.dpm.ac_power = false; - if (adev->pm.funcs->enable_bapm) + if (adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); mutex_unlock(&adev->pm.mutex); } @@ -88,9 +88,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (adev->pp_enabled) { + if (adev->powerplay.pp_funcs->get_current_power_state) pm = amdgpu_dpm_get_current_power_state(adev); - } else + else pm = adev->pm.dpm.user_state; return snprintf(buf, PAGE_SIZE, "%s\n", @@ -140,13 +140,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - enum amd_dpm_forced_level level; + enum amd_dpm_forced_level level = 0xff; if ((adev->flags & AMD_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return snprintf(buf, PAGE_SIZE, "off\n"); - level = amdgpu_dpm_get_performance_level(adev); + if (adev->powerplay.pp_funcs->get_performance_level) + level = amdgpu_dpm_get_performance_level(adev); + else + level = adev->pm.dpm.forced_level; + return snprintf(buf, PAGE_SIZE, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : @@ -167,7 +171,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level; + enum amd_dpm_forced_level current_level = 0xff; int ret = 0; /* Can't force performance level when the card is off */ @@ -175,7 +179,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - current_level = amdgpu_dpm_get_performance_level(adev); + if (adev->powerplay.pp_funcs->get_performance_level) + current_level = amdgpu_dpm_get_performance_level(adev); if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; @@ -203,9 +208,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, if (current_level == level) return count; - if (adev->pp_enabled) - amdgpu_dpm_force_performance_level(adev, level); - else { + if (adev->powerplay.pp_funcs->force_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { count = -EINVAL; @@ -233,7 +236,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, struct pp_states_info data; int i, buf_len; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->get_pp_num_states) amdgpu_dpm_get_pp_num_states(adev, &data); buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); @@ -257,8 +260,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0; - if (adev->pp_enabled) { - + if (adev->powerplay.pp_funcs->get_current_power_state + && adev->powerplay.pp_funcs->get_pp_num_states) { pm = amdgpu_dpm_get_current_power_state(adev); amdgpu_dpm_get_pp_num_states(adev, &data); @@ -280,25 +283,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct pp_states_info data; - enum amd_pm_state_type pm = 0; - int i; - - if (adev->pp_force_state_enabled && adev->pp_enabled) { - pm = amdgpu_dpm_get_current_power_state(adev); - amdgpu_dpm_get_pp_num_states(adev, &data); - - for (i = 0; i < data.nums; i++) { - if (pm == data.states[i]) - break; - } - if (i == data.nums) - i = -EINVAL; - - return snprintf(buf, PAGE_SIZE, "%d\n", i); - - } else + if (adev->pp_force_state_enabled) + return amdgpu_get_pp_cur_state(dev, attr, buf); + else return snprintf(buf, PAGE_SIZE, "\n"); } @@ -347,7 +335,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->get_pp_table) size = amdgpu_dpm_get_pp_table(adev, &table); else return 0; @@ -368,7 +356,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->set_pp_table) amdgpu_dpm_set_pp_table(adev, buf, count); return count; @@ -380,14 +368,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - ssize_t size = 0; - if (adev->pp_enabled) - size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); - else if (adev->pm.funcs->print_clock_levels) - size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); - - return size; + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); } static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, @@ -416,10 +401,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, mask |= 1 << level; } - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); - else if (adev->pm.funcs->force_clock_level) - adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); + fail: return count; } @@ -430,14 +414,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - ssize_t size = 0; - - if (adev->pp_enabled) - size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); - else if (adev->pm.funcs->print_clock_levels) - size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); - return size; + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); } static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, @@ -465,11 +446,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, } mask |= 1 << level; } - - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); - else if (adev->pm.funcs->force_clock_level) - adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); + fail: return count; } @@ -480,14 +459,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - ssize_t size = 0; - - if (adev->pp_enabled) - size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); - else if (adev->pm.funcs->print_clock_levels) - size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); - return size; + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); } static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, @@ -515,11 +491,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, } mask |= 1 << level; } - - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); - else if (adev->pm.funcs->force_clock_level) - adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); + fail: return count; } @@ -532,10 +506,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->get_sclk_od) value = amdgpu_dpm_get_sclk_od(adev); - else if (adev->pm.funcs->get_sclk_od) - value = adev->pm.funcs->get_sclk_od(adev); return snprintf(buf, PAGE_SIZE, "%d\n", value); } @@ -556,12 +528,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, count = -EINVAL; goto fail; } + if (adev->powerplay.pp_funcs->set_sclk_od) + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); if (adev->pp_enabled) { - amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); - } else if (adev->pm.funcs->set_sclk_od) { - adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); + } else { adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; amdgpu_pm_compute_clocks(adev); } @@ -578,10 +550,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->get_mclk_od) value = amdgpu_dpm_get_mclk_od(adev); - else if (adev->pm.funcs->get_mclk_od) - value = adev->pm.funcs->get_mclk_od(adev); return snprintf(buf, PAGE_SIZE, "%d\n", value); } @@ -602,12 +572,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, count = -EINVAL; goto fail; } + if (adev->powerplay.pp_funcs->set_mclk_od) + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); if (adev->pp_enabled) { - amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); - } else if (adev->pm.funcs->set_mclk_od) { - adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); + } else { adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; amdgpu_pm_compute_clocks(adev); } @@ -621,14 +591,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - int ret = 0; + int ret = 0xff; - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->get_power_profile_state) ret = amdgpu_dpm_get_power_profile_state( adev, query); - else if (adev->pm.funcs->get_power_profile_state) - ret = adev->pm.funcs->get_power_profile_state( - adev, query); if (ret) return ret; @@ -675,15 +642,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev, char *sub_str, buf_cpy[128], *tmp_str; const char delimiter[3] = {' ', '\n', '\0'}; long int value; - int ret = 0; + int ret = 0xff; if (strncmp("reset", buf, strlen("reset")) == 0) { - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->reset_power_profile_state) ret = amdgpu_dpm_reset_power_profile_state( adev, request); - else if (adev->pm.funcs->reset_power_profile_state) - ret = adev->pm.funcs->reset_power_profile_state( - adev, request); if (ret) { count = -EINVAL; goto fail; @@ -692,12 +656,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev, } if (strncmp("set", buf, strlen("set")) == 0) { - if (adev->pp_enabled) + if (adev->powerplay.pp_funcs->set_power_profile_state) ret = amdgpu_dpm_set_power_profile_state( adev, request); - else if (adev->pm.funcs->set_power_profile_state) - ret = adev->pm.funcs->set_power_profile_state( - adev, request); + if (ret) { count = -EINVAL; goto fail; @@ -745,13 +707,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev, loop++; } - - if (adev->pp_enabled) - ret = amdgpu_dpm_set_power_profile_state( - adev, request); - else if (adev->pm.funcs->set_power_profile_state) - ret = adev->pm.funcs->set_power_profile_state( - adev, request); + if (adev->powerplay.pp_funcs->set_power_profile_state) + ret = amdgpu_dpm_set_power_profile_state(adev, request); if (ret) count = -EINVAL; @@ -831,7 +788,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) + if (!adev->powerplay.pp_funcs->get_temperature) temp = 0; else temp = amdgpu_dpm_get_temperature(adev); @@ -862,7 +819,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; - if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) + if (!adev->powerplay.pp_funcs->get_fan_control_mode) return -EINVAL; pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); @@ -879,7 +836,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, int err; int value; - if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) + if (!adev->powerplay.pp_funcs->set_fan_control_mode) return -EINVAL; err = kstrtoint(buf, 10, &value); @@ -919,9 +876,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, value = (value * 100) / 255; - err = amdgpu_dpm_set_fan_speed_percent(adev, value); - if (err) - return err; + if (adev->powerplay.pp_funcs->set_fan_speed_percent) { + err = amdgpu_dpm_set_fan_speed_percent(adev, value); + if (err) + return err; + } return count; } @@ -932,11 +891,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); int err; - u32 speed; + u32 speed = 0; - err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); - if (err) - return err; + if (adev->powerplay.pp_funcs->get_fan_speed_percent) { + err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); + if (err) + return err; + } speed = (speed * 255) / 100; @@ -949,11 +910,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); int err; - u32 speed; + u32 speed = 0; - err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - if (err) - return err; + if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); + if (err) + return err; + } return sprintf(buf, "%i\n", speed); } @@ -1008,21 +971,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* mask fan attributes if we have no bindings for this asic to expose */ - if ((!adev->pm.funcs->get_fan_speed_percent && + if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ - (!adev->pm.funcs->get_fan_control_mode && + (!adev->powerplay.pp_funcs->get_fan_control_mode && attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ effective_mode &= ~S_IRUGO; - if ((!adev->pm.funcs->set_fan_speed_percent && + if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ - (!adev->pm.funcs->set_fan_control_mode && + (!adev->powerplay.pp_funcs->set_fan_control_mode && attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ effective_mode &= ~S_IWUSR; /* hide max/min values if we can't both query and manage the fan */ - if ((!adev->pm.funcs->set_fan_speed_percent && - !adev->pm.funcs->get_fan_speed_percent) && + if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && + !adev->powerplay.pp_funcs->get_fan_speed_percent) && (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; @@ -1055,7 +1018,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) if (!adev->pm.dpm_enabled) return; - if (adev->pm.funcs->get_temperature) { + if (adev->powerplay.pp_funcs->get_temperature) { int temp = amdgpu_dpm_get_temperature(adev); if (temp < adev->pm.dpm.thermal.min_temp) @@ -1087,7 +1050,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, true : false; /* check if the vblank period is too short to adjust the mclk */ - if (single_display && adev->pm.funcs->vblank_too_short) { + if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { if (amdgpu_dpm_vblank_too_short(adev)) single_display = false; } @@ -1216,7 +1179,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) struct amdgpu_ps *ps; enum amd_pm_state_type dpm_state; int ret; - bool equal; + bool equal = false; /* if dpm init failed */ if (!adev->pm.dpm_enabled) @@ -1236,7 +1199,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) else return; - if (amdgpu_dpm == 1) { + if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { printk("switching from power state:\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); printk("switching to power state:\n"); @@ -1245,15 +1208,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) /* update whether vce is active */ ps->vce_active = adev->pm.dpm.vce_active; - - amdgpu_dpm_display_configuration_changed(adev); + if (adev->powerplay.pp_funcs->display_configuration_changed) + amdgpu_dpm_display_configuration_changed(adev); ret = amdgpu_dpm_pre_set_power_state(adev); if (ret) return; - if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) - equal = false; + if (adev->powerplay.pp_funcs->check_state_equal) { + if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) + equal = false; + } if (equal) return; @@ -1264,7 +1229,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - if (adev->pm.funcs->force_performance_level) { + if (adev->powerplay.pp_funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; /* force low perf level for thermal */ @@ -1280,7 +1245,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { + if (adev->powerplay.pp_funcs->powergate_uvd) { /* enable/disable UVD */ mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_uvd(adev, !enable); @@ -1302,7 +1267,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { + if (adev->powerplay.pp_funcs->powergate_vce) { /* enable/disable VCE */ mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_vce(adev, !enable); @@ -1337,8 +1302,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) { int i; - if (adev->pp_enabled) - /* TO DO */ + if (adev->powerplay.pp_funcs->print_power_state == NULL) return; for (i = 0; i < adev->pm.dpm.num_ps; i++) @@ -1353,10 +1317,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.sysfs_initialized) return 0; - if (!adev->pp_enabled) { - if (adev->pm.funcs->get_temperature == NULL) - return 0; - } + if (adev->powerplay.pp_funcs->get_temperature == NULL) + return 0; adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, @@ -1634,8 +1596,8 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) return amdgpu_debugfs_pm_info_pp(m, adev); } else { mutex_lock(&adev->pm.mutex); - if (adev->pm.funcs->debugfs_print_current_performance_level) - adev->pm.funcs->debugfs_print_current_performance_level(adev, m); + if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) + adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); else seq_printf(m, "Debugfs support not implemented for this asic\n"); mutex_unlock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index b0c4db8098edc..5cd5b8ee9744b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -87,17 +87,20 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_OLAND: case CHIP_HAINAN: amd_pp->ip_funcs = &si_dpm_ip_funcs; + amd_pp->pp_funcs = &si_dpm_funcs; break; #endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: amd_pp->ip_funcs = &ci_dpm_ip_funcs; + amd_pp->pp_funcs = &ci_dpm_funcs; break; case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: amd_pp->ip_funcs = &kv_dpm_ip_funcs; + amd_pp->pp_funcs = &kv_dpm_funcs; break; #endif default: diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index bdf792822ff5b..68ce1bdaf2fcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n); static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, u32 target_tdp); static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate); -static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev); static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev); static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, @@ -6282,7 +6281,6 @@ static int ci_dpm_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - ci_dpm_set_dpm_funcs(adev); ci_dpm_set_irq_funcs(adev); return 0; @@ -7035,7 +7033,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = { .set_powergating_state = ci_dpm_set_powergating_state, }; -static const struct amd_pm_funcs ci_dpm_funcs = { +const struct amd_pm_funcs ci_dpm_funcs = { .get_temperature = &ci_dpm_get_temp, .pre_set_power_state = &ci_dpm_pre_set_power_state, .set_power_state = &ci_dpm_set_power_state, @@ -7067,12 +7065,6 @@ static const struct amd_pm_funcs ci_dpm_funcs = { .read_sensor = ci_dpm_read_sensor, }; -static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) -{ - if (adev->pm.funcs == NULL) - adev->pm.funcs = &ci_dpm_funcs; -} - static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = { .set = ci_dpm_set_interrupt_state, .process = ci_dpm_process_interrupt, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h index b1c8e7b446ea7..c7b4349f6319f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h @@ -26,5 +26,6 @@ extern const struct amd_ip_funcs ci_dpm_ip_funcs; extern const struct amd_ip_funcs kv_dpm_ip_funcs; - +extern const struct amd_pm_funcs ci_dpm_funcs; +extern const struct amd_pm_funcs kv_dpm_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f68d7abe1ed9f..b57399a462c2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -42,7 +42,6 @@ #define KV_MINIMUM_ENGINE_CLOCK 800 #define SMC_RAM_END 0x40000 -static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); static int kv_enable_nb_dpm(struct amdgpu_device *adev, bool enable); @@ -2961,7 +2960,6 @@ static int kv_dpm_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - kv_dpm_set_dpm_funcs(adev); kv_dpm_set_irq_funcs(adev); return 0; @@ -3327,7 +3325,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = { .set_powergating_state = kv_dpm_set_powergating_state, }; -static const struct amd_pm_funcs kv_dpm_funcs = { +const struct amd_pm_funcs kv_dpm_funcs = { .get_temperature = &kv_dpm_get_temp, .pre_set_power_state = &kv_dpm_pre_set_power_state, .set_power_state = &kv_dpm_set_power_state, @@ -3345,12 +3343,6 @@ static const struct amd_pm_funcs kv_dpm_funcs = { .read_sensor = &kv_dpm_read_sensor, }; -static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) -{ - if (adev->pm.funcs == NULL) - adev->pm.funcs = &kv_dpm_funcs; -} - static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { .set = kv_dpm_set_interrupt_state, .process = kv_dpm_process_interrupt, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 05b3dbf585d59..9b8db60462718 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -1847,7 +1847,6 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); -static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev); static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); static struct si_power_info *si_get_pi(struct amdgpu_device *adev) @@ -7944,7 +7943,6 @@ static int si_dpm_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - si_dpm_set_dpm_funcs(adev); si_dpm_set_irq_funcs(adev); return 0; } @@ -8062,7 +8060,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = { .set_powergating_state = si_dpm_set_powergating_state, }; -static const struct amd_pm_funcs si_dpm_funcs = { +const struct amd_pm_funcs si_dpm_funcs = { .get_temperature = &si_dpm_get_temp, .pre_set_power_state = &si_dpm_pre_set_power_state, .set_power_state = &si_dpm_set_power_state, @@ -8083,12 +8081,6 @@ static const struct amd_pm_funcs si_dpm_funcs = { .read_sensor = &si_dpm_read_sensor, }; -static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) -{ - if (adev->pm.funcs == NULL) - adev->pm.funcs = &si_dpm_funcs; -} - static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { .set = si_dpm_set_interrupt_state, .process = si_dpm_process_interrupt, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h index 51ce21c5f4fbd..9fe343de34779 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h @@ -246,6 +246,7 @@ enum si_display_gap }; extern const struct amd_ip_funcs si_dpm_ip_funcs; +extern const struct amd_pm_funcs si_dpm_funcs; struct ni_leakage_coeffients { From 6df9855fe200d4e7e5cdd85575fb28cce808b2cc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 8 Sep 2017 14:05:51 +0800 Subject: [PATCH 153/232] drm/amdgpu: add support for request SI/CI firmware in CGS Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 79 +++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index df3bf22039d58..e521920515076 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -633,6 +633,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { + case CHIP_TAHITI: + strcpy(fw_name, "radeon/tahiti_smc.bin"); + break; + case CHIP_PITCAIRN: + if ((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811))) { + info->is_kicker = true; + strcpy(fw_name, "radeon/pitcairn_k_smc.bin"); + } else { + strcpy(fw_name, "radeon/pitcairn_smc.bin"); + } + break; + case CHIP_VERDE: + if (((adev->pdev->device == 0x6820) && + ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83))) || + ((adev->pdev->device == 0x6821) && + ((adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87))) || + ((adev->pdev->revision == 0x87) && + ((adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682b)))) { + info->is_kicker = true; + strcpy(fw_name, "radeon/verde_k_smc.bin"); + } else { + strcpy(fw_name, "radeon/verde_smc.bin"); + } + break; + case CHIP_OLAND: + if (((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6600) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605) || + (adev->pdev->device == 0x6610))) || + ((adev->pdev->revision == 0x83) && + (adev->pdev->device == 0x6610))) { + info->is_kicker = true; + strcpy(fw_name, "radeon/oland_k_smc.bin"); + } else { + strcpy(fw_name, "radeon/oland_smc.bin"); + } + break; + case CHIP_HAINAN: + if (((adev->pdev->revision == 0x81) && + (adev->pdev->device == 0x6660)) || + ((adev->pdev->revision == 0x83) && + ((adev->pdev->device == 0x6660) || + (adev->pdev->device == 0x6663) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)))) { + info->is_kicker = true; + strcpy(fw_name, "radeon/hainan_k_smc.bin"); + } else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) { + info->is_kicker = true; + strcpy(fw_name, "radeon/banks_k_2_smc.bin"); + } else { + strcpy(fw_name, "radeon/hainan_smc.bin"); + } + break; + case CHIP_BONAIRE: + if ((adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x665f)) { + info->is_kicker = true; + strcpy(fw_name, "radeon/bonaire_k_smc.bin"); + } else { + strcpy(fw_name, "radeon/bonaire_smc.bin"); + } + break; + case CHIP_HAWAII: + if (adev->pdev->revision == 0x80) { + info->is_kicker = true; + strcpy(fw_name, "radeon/hawaii_k_smc.bin"); + } else { + strcpy(fw_name, "radeon/hawaii_smc.bin"); + } + break; case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || From e71b7ae6731c1b426818ce4c9baa493fb4d6c427 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 8 Sep 2017 14:31:26 +0800 Subject: [PATCH 154/232] drm/amd/powerplay: fix spelling typo in function name Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 2 +- .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 2 +- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 12 +++++------ .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 11 ---------- .../drm/amd/powerplay/smumgr/iceland_smc.c | 21 +++++-------------- .../drm/amd/powerplay/smumgr/iceland_smumgr.h | 2 +- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 10 --------- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.c | 20 ++++-------------- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.h | 2 +- 9 files changed, 19 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 953e0c9ad7cdd..49733c7817179 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -470,7 +470,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE */ -bool atomctrl_is_voltage_controled_by_gpio_v3( +bool atomctrl_is_voltage_controlled_by_gpio_v3( struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index e9fe2e84006b1..8d4188ad941af 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -291,7 +291,7 @@ extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr); extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode); extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers); -extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode); +extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode); extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f1f1e4b390ca0..03075c6ac5127 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1392,13 +1392,13 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) } data->fast_watermark_threshold = 100; - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDGFX)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } @@ -1406,10 +1406,10 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } @@ -1421,10 +1421,10 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 8712f093d6d90..9f612dd395ac0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -398,11 +398,6 @@ static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); @@ -472,12 +467,6 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW19 */ - if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW20 */ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 51adf04ab4b38..1ed3214a965f2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -193,11 +193,6 @@ static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); @@ -317,12 +312,6 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW17 */ - if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW18 */ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, @@ -339,7 +328,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, uint32_t *vol) { @@ -749,7 +738,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), @@ -1104,7 +1093,7 @@ static int iceland_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); @@ -1113,7 +1102,7 @@ static int iceland_populate_single_memory_level( if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) { memory_level->MinVddci = memory_level->MinVddc; } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk, memory_clock, &memory_level->MinVddci); @@ -1776,7 +1765,7 @@ static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); - dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); def1 = defaults->bapmti_r; def2 = defaults->bapmti_rc; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h index 8eae01b37c401..802472530d346 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -39,7 +39,7 @@ struct iceland_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 99a00bd392565..2d444bb4802a2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -288,11 +288,6 @@ static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); @@ -358,11 +353,6 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo " diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 65d3a48939582..a628eec5e6dac 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -97,7 +97,7 @@ static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = { */ -static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) { @@ -539,7 +539,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_sclk, engine_clock, &graphic_level->MinVoltage, &mvdd); PP_ASSERT_WITH_CODE((!result), @@ -895,7 +895,7 @@ static int tonga_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (NULL != pptable_info->vdd_dep_on_mclk) { - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &mvdd); @@ -1838,7 +1838,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; dpm_table->BAPM_TEMP_GRADIENT = - PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); pdef1 = defaults->bapmti_r; pdef2 = defaults->bapmti_rc; @@ -1958,11 +1958,6 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = @@ -2035,13 +2030,6 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed !", return -EINVAL); - /* DW19 */ - if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML " - "Min and Max Vid Failed !", - return -EINVAL); - /* DW20 */ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE( diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h index 962860f13f24d..9d6a78a65976c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -40,7 +40,7 @@ struct tonga_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; From 510c2558b9055f3ae8a405cb18e1cbb7262449fc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 12 Sep 2017 13:18:13 +0800 Subject: [PATCH 155/232] drm/amd/powerplay: fix pcie max lane define error Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 629990f505dde..57a0467b72676 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -297,7 +297,7 @@ typedef enum PP_PCIEGen PP_PCIEGen; #define PP_Min_PCIEGen PP_PCIEGen1 #define PP_Max_PCIEGen PP_PCIEGen3 #define PP_Min_PCIELane 1 -#define PP_Max_PCIELane 32 +#define PP_Max_PCIELane 16 enum phm_clock_Type { PHM_DispClock = 1, From 9f4b35411cfed96d4f9f092b2fed14905af84d89 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 8 Sep 2017 19:34:33 +0800 Subject: [PATCH 156/232] drm/amd/powerplay: add CI asics support to smumgr (v3) This ports support for CI asics (Bonaire, Hawaii) to the powerplay smumgr v2: warning fix (Alex) v3: squash in fix for thermal (Tom) Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../include/asic_reg/smu/smu_7_0_1_sh_mask.h | 2 + drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 1 + drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2755 +++++++++++++++++ drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h | 52 + .../gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 86 + .../gpu/drm/amd/powerplay/smumgr/ci_smumgr.h | 78 + drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 3 + 8 files changed, 2978 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 34c6ff52710e7..6af9f0217b349 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -5454,5 +5454,7 @@ #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 #define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 #define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 +#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en_MASK 0x1 +#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en__SHIFT 0 #endif /* SMU_7_0_1_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index eb6609116a7af..f807dd639aede 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -33,6 +33,7 @@ struct pp_hwmgr; #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) +extern const struct pp_smumgr_func ci_smu_funcs; extern const struct pp_smumgr_func cz_smu_funcs; extern const struct pp_smumgr_func iceland_smu_funcs; extern const struct pp_smumgr_func tonga_smu_funcs; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 1703bbefbfd52..a423c0a851298 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -4,7 +4,7 @@ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ - smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o + smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o ci_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c new file mode 100644 index 0000000000000..b7a2391907852 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -0,0 +1,2755 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "linux/delay.h" +#include + +#include "smumgr.h" +#include "pp_debug.h" +#include "ci_smc.h" +#include "ci_smumgr.h" +#include "ppsmc.h" +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "cgs_common.h" +#include "atombios.h" +#include "pppcielanes.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "processpptables.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x40000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define CISLAND_MINIMUM_ENGINE_CLOCK 800 +#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 + +static const struct ci_pt_defaults defaults_hawaii_xt = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_hawaii_pro = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_bonaire_xt = { + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + + +static const struct ci_pt_defaults defaults_saturn_xt = { + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, + { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, + { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } +}; + + +static int ci_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smc_addr, uint32_t limit) +{ + if ((0 != (3 & smc_addr)) + || ((smc_addr + 3) >= limit)) { + pr_err("smc_addr invalid \n"); + return -EINVAL; + } + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + return 0; +} + +static int ci_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + int result; + uint32_t data = 0; + uint32_t original_data; + uint32_t addr = 0; + uint32_t extra_shift; + + if ((3 & smc_start_address) + || ((smc_start_address + byte_count) >= limit)) { + pr_err("smc_start_address invalid \n"); + return -EINVAL; + } + + addr = smc_start_address; + + while (byte_count >= 4) { + /* Bytes are written into the SMC address space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = ci_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + + data = 0; + + result = ci_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + + original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = (0x100 * data) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + result = ci_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + + return 0; +} + + +static int ci_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; + + ci_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +bool ci_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(smumgr->device, + CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +static int ci_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit) +{ + int result; + + result = ci_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + return 0; +} + +int ci_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + int ret; + + if (!ci_is_smc_ram_running(smumgr)) + return -EINVAL; + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + pr_info("\n failed to send message %x ret is %d\n", msg, ret); + + return 0; +} + +int ci_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + return ci_send_msg_to_smc(smumgr, msg); +} + +static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + switch (dev_id) { + case 0x67BA: + case 0x66B1: + smu_data->power_tune_defaults = &defaults_hawaii_pro; + break; + case 0x67B8: + case 0x66B0: + smu_data->power_tune_defaults = &defaults_hawaii_xt; + break; + case 0x6640: + case 0x6641: + case 0x6646: + case 0x6647: + smu_data->power_tune_defaults = &defaults_saturn_xt; + break; + case 0x6649: + case 0x6650: + case 0x6651: + case 0x6658: + case 0x665C: + case 0x665D: + case 0x67A0: + case 0x67A1: + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B9: + case 0x67BE: + default: + smu_data->power_tune_defaults = &defaults_bonaire_xt; + break; + } +} + +static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + *vol = allowed_clock_voltage_table->entries[i - 1].v; + return 0; +} + +static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ss_info; + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ss_info)) { + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ss_info.speed_spectrum_rate); + uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, + const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + *p_shed = i; + break; + } + } +} + +static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK); + + if (clock < min) { + pr_info("Engine clock can't satisfy stutter requirement!\n"); + return 0; + } + for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + temp = clock >> i; + + if (temp >= min || i == 0) + break; + } + return i; +} + +static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU7_Discrete_GraphicsLevel *level) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + + result = ci_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, clock, + (uint32_t *)(&level->MinVddc)); + if (result) { + pr_err("vdd_dep_on_sclk table is NULL\n"); + return result; + } + + level->SclkFrequency = clock; + level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) + ci_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + clock, + &level->MinVddcPhases); + + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + level->EnabledForThrottle = 1; + level->UpH = 0; + level->DownH = 0; + level->VoltageDownH = 0; + level->PowerThrottle = 0; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = + ci_get_sleep_divider_id_from_clock(clock, + CISLAND_MINIMUM_ENGINE_CLOCK); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + } + + return result; +} + +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = ci_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &levels[i]); + if (result) + return result; + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + if (i == (dpm_table->sclk_table.count - 1)) + smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + } + + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + result = ci_copy_bytes_to_smc(hwmgr->smumgr, array, + (u8 *)levels, array_size, + SMC_RAM_END); + + return result; + +} + +static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (ci_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + uint16_t tmp = 0; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity; + else + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp); + + return 0; +} + +static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3); + } else { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage); + } + } + + return 0; +} + +static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint8_t *vid = smu_data->power_tune_table.VddCVid; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + + return 0; +} + +static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + int i, min, max; + + min = max = hi_vid[0]; + for (i = 0; i < 8; i++) { + if (0 != hi_vid[i]) { + if (min > hi_vid[i]) + min = hi_vid[i]; + if (max < hi_vid[i]) + max = hi_vid[i]; + } + + if (0 != lo_vid[i]) { + if (min > lo_vid[i]) + min = lo_vid[i]; + if (max < lo_vid[i]) + max = lo_vid[i]; + } + } + + if ((min == 0) || (max == 0)) + return -EINVAL; + smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max; + smu_data->power_tune_table.GnbLPMLMinVid = (u8)min; + + return 0; +} + +static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + int ret = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) { + pr_err("Attempt to get pm_fuse_table_offset Failed!\n"); + return -EINVAL; + } + + /* DW0 - DW3 */ + ret = ci_populate_bapm_vddc_vid_sidd(hwmgr); + /* DW4 - DW5 */ + ret |= ci_populate_vddc_vid(hwmgr); + /* DW6 */ + ret |= ci_populate_svi_load_line(hwmgr); + /* DW7 */ + ret |= ci_populate_tdc_limit(hwmgr); + /* DW8 */ + ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset); + + ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset); + + ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr); + + ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr); + if (ret) + return ret; + + ret = ci_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END); + } + return ret; +} + +static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + const uint16_t *def1, *def2; + int i, j, k; + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = 0; + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + if (ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU7_DTE_SOURCES; j++) { + for (k = 0; k < SMU7_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warn("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU7_Discrete_VoltageLevel *smc_voltage_tab) +{ + int result; + + result = ci_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd); + + return 0; +} + +static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddc_voltage_table.entries[count]), + &(table->VddcLevel[count])); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->VddciLevelCount = data->vddci_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddci_voltage_table.entries[count]), + &(table->VddciLevel[count])); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->MvddLevelCount = data->mvdd_voltage_table.count; + + for (count = 0; count < table->MvddLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->mvdd_voltage_table.entries[count]), + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + + +static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result; + + result = ci_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -EINVAL); + + return 0; +} + +static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU7_Discrete_Ulv *state) +{ + uint32_t voltage_response_time, ulv_voltage; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if (ulv_voltage == 0) { + data->ulv_supported = false; + return 0; + } + + if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffset = 0; + else + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffsetVid = 0; + else /* used in SVI2 Mode */ + state->VddcOffsetVid = (uint8_t)( + (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) + * VOLTAGE_VID_OFFSET_SCALE2 + / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr, + SMU7_Discrete_Ulv *ulv_level) +{ + return ci_populate_ulv_level(hwmgr, ulv_level); +} + +static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint32_t i; + +/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int ci_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + + return mc_para_index; +} + +static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + + return mc_para_index; +} + +static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + *p_shed = i; + break; + } + } + + return 0; +} + +static int ci_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *memory_level + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + memory_clock, + &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpH = 0; + memory_level->DownH = 100; + memory_level->VoltageDownH = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + /* stutter mode not support on ci */ + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (ci_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + else + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } else + dll_state_on = data->dll_default_on; + } else { + memory_level->StrobeRatio = + ci_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = ci_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY; + SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -EINVAL); + result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (0 != result) + return result; + } + + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + if ((dpm_table->mclk_table.count >= 2) + && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) { + smu_data->smc_state_table.MemoryLevel[1].MinVddci = + smu_data->smc_state_table.MemoryLevel[0].MinVddci; + smu_data->smc_state_table.MemoryLevel[1].MinMvdd = + smu_data->smc_state_table.MemoryLevel[0].MinMvdd; + } + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + result = ci_copy_bytes_to_smc(hwmgr->smumgr, + level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, + SMU7_Discrete_VoltageLevel *voltage) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -EINVAL); + + } else { + return -EINVAL; + } + + return 0; +} + +static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + + SMU7_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1; + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; + else { + if (data->acpi_vddci != 0) + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); + } + + if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpH = 0; + table->MemoryACPILevel.DownH = 100; + table->MemoryACPILevel.VoltageDownH = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + table->UvdLevelCount = (uint8_t)(uvd_table->count); + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = + uvd_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = + uvd_table->entries[count].dclk; + table->UvdLevel[count].MinVddc = + uvd_table->entries[count].v * VOLTAGE_SCALE; + table->UvdLevel[count].MinVddcPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc); + } + + return result; +} + +static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + table->VceLevelCount = (uint8_t)(vce_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = vce_table->entries[count].evclk; + table->VceLevel[count].MinVoltage = + vce_table->entries[count].v * VOLTAGE_SCALE; + table->VceLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_acp_clock_voltage_dependency_table *acp_table = + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + + table->AcpLevelCount = (uint8_t)(acp_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; + table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; + table->AcpLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_samu_clock_voltage_dependency_table *samu_table = + hwmgr->dyn_state.samu_clock_voltage_dependency_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(samu_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + table->SamuLevel[count].Frequency = samu_table->entries[count].samclk; + table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE; + table->SamuLevel[count].MinPhases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + int result = 0; + SMU7_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = ci_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) + break; + } + } + + if (0 == result) { + result = ci_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU7_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + return result; +} + +static int ci_populate_mc_reg_address(struct pp_smumgr *smumgr, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + const struct ci_smumgr *smu_data = (struct ci_smumgr *)smumgr->backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<address[] array out of boundary", return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +static void ci_convert_mc_registers( + const struct ci_mc_reg_entry *entry, + SMU7_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int ci_convert_mc_reg_table_entry_to_smc( + struct pp_smumgr *smumgr, + const uint32_t memory_clock, + SMU7_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU7_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = ci_convert_mc_reg_table_entry_to_smc( + hwmgr->smumgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters)); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]); + + return ci_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); + result = ci_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return ci_copy_bytes_to_smc(smumgr, smu_data->mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END); +} + +static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->SVI2Enable = 1; + else + table->SVI2Enable = 0; + return 0; +} + +static int ci_start_smc(struct pp_smumgr *smumgr) +{ + /* set smc instruct start point at 0x0 */ + ci_program_jump_on_start(smumgr); + + /* enable smc clock */ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +int ci_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + u32 i; + + ci_initialize_power_tune_defaults(hwmgr); + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + ci_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (data->ulv_supported) { + result = ci_populate_ulv_state(hwmgr, &(table->Ulv)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = ci_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = ci_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = ci_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = ci_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = ci_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = ci_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = ci_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = ci_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = ci_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + table->UvdBootLevel = 0; + table->VceBootLevel = 0; + table->AcpBootLevel = 0; + table->SamuBootLevel = 0; + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + result = ci_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = ci_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = ci_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->UVDInterval = 1; + table->VCEInterval = 1; + table->ACPInterval = 1; + table->SAMUInterval = 1; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->VddcVddciDelta = 4000; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + + table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; + table->PCIeGenInterval = 1; + + ci_populate_smc_svi2_config(hwmgr, table); + + for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++) + CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = ci_copy_bytes_to_smc(hwmgr->smumgr, smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + result = ci_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table!", return result); + + result = ci_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + + ci_start_smc(hwmgr->smumgr); + + return 0; +} + +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + if (0 == ci_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = ci_copy_bytes_to_smc(hwmgr->smumgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + + return 0; +} + +static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return ci_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = ci_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, + LowSclkInterruptT), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = ci_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); + + result = ci_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t ci_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU7_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU7_SoftRegisters, AverageGraphicsA); + case PreVBlankGap: + return offsetof(SMU7_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU7_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case LowSclkInterruptThreshold: + return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); + } + } + pr_debug("can't get the offset of type %x member %x\n", type, member); + return 0; +} + +uint32_t ci_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU7_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU7_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU7_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU7_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU7_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDCI: + return SMU7_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU7_MAX_LEVELS_MVDD; + } + + pr_debug("can't get the mac of %x\n", value); + return 0; +} + +static int ci_load_smc_ucode(struct pp_smumgr *smumgr) +{ + uint32_t byte_count, start_addr; + uint8_t *src; + uint32_t data; + + struct cgs_firmware_info info = {0}; + + cgs_get_firmware_info(smumgr->device, CGS_UCODE_ID_SMU, &info); + + smumgr->is_kicker = info.is_kicker; + byte_count = info.image_size; + src = (uint8_t *)info.kptr; + start_addr = info.ucode_start_address; + + if (byte_count > SMC_RAM_END) { + pr_err("SMC address is beyond the SMC RAM area.\n"); + return -EINVAL; + } + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + for (; byte_count >= 4; byte_count -= 4) { + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + } + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + if (0 != byte_count) { + pr_err("SMC size must be dividable by 4\n"); + return -EINVAL; + } + + return 0; +} + +static int ci_upload_firmware(struct pp_hwmgr *hwmgr) +{ + if (ci_is_smc_ram_running(hwmgr->smumgr)) { + pr_info("smc is running, no need to load smc firmware\n"); + return 0; + } + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr->smumgr, SMC_IND, RCU_UC_EVENTS, + boot_seq_done, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, + pre_fetcher_en, 1); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + return ci_load_smc_ucode(hwmgr->smumgr); +} + +int ci_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp = 0; + int result; + bool error = false; + + if (ci_upload_firmware(hwmgr)) + return -EINVAL; + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + ci_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->mc_reg_table_start = tmp; + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->fan_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->arb_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? 1 : 0; +} + +static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct ci_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct ci_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int ci_set_valid_flag(struct ci_mc_reg_table *table) +{ + uint8_t i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1 << i); + break; + } + } + } + + return 0; +} + +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + pp_atomctrl_mc_reg_table *table; + struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = ci_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = ci_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + ci_set_s0_mc_reg_index(ni_table); + result = ci_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + ci_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return ci_is_smc_ram_running(hwmgr->smumgr); +} + +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *) + (hwmgr->smumgr->backend); + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpH = request->up_hyst; + levels[i].DownH = request->down_hyst; + } + + return ci_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h new file mode 100644 index 0000000000000..05b36b8009425 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h @@ -0,0 +1,52 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef CI_SMC_H +#define CI_SMC_H + +#include + + +struct pp_smumgr; +struct pp_hwmgr; +struct amd_pp_profile; + +int ci_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter); +int ci_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int ci_init_smc_table(struct pp_hwmgr *hwmgr); +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t ci_get_offsetof(uint32_t type, uint32_t member); +uint32_t ci_get_mac_definition(uint32_t value); +int ci_process_firmware_header(struct pp_hwmgr *hwmgr); +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr); +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c new file mode 100644 index 0000000000000..62f6bdae66127 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -0,0 +1,86 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "linux/delay.h" + +#include "smumgr.h" +#include "ci_smumgr.h" +#include "cgs_common.h" +#include "ci_smc.h" + +static int ci_smu_init(struct pp_smumgr *smumgr) +{ + int i; + struct ci_smumgr *ci_priv = NULL; + + ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); + + if (ci_priv == NULL) + return -ENOMEM; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + ci_priv->activity_target[i] = 30; + + smumgr->backend = ci_priv; + + return 0; +} + +static int ci_smu_fini(struct pp_smumgr *smumgr) +{ + kfree(smumgr->backend); + smumgr->backend = NULL; + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static int ci_start_smu(struct pp_smumgr *smumgr) +{ + return 0; +} + +const struct pp_smumgr_func ci_smu_funcs = { + .smu_init = ci_smu_init, + .smu_fini = ci_smu_fini, + .start_smu = ci_start_smu, + .check_fw_load_finish = NULL, + .request_smu_load_fw = NULL, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = ci_send_msg_to_smc, + .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, + .get_offsetof = ci_get_offsetof, + .process_firmware_header = ci_process_firmware_header, + .init_smc_table = ci_init_smc_table, + .update_sclk_threshold = ci_update_sclk_threshold, + .thermal_setup_fan_table = ci_thermal_setup_fan_table, + .populate_all_graphic_levels = ci_populate_all_graphic_levels, + .populate_all_memory_levels = ci_populate_all_memory_levels, + .get_mac_definition = ci_get_mac_definition, + .initialize_mc_reg_table = ci_initialize_mc_reg_table, + .is_dpm_running = ci_is_dpm_running, + .populate_requested_graphic_levels = ci_populate_requested_graphic_levels, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h new file mode 100644 index 0000000000000..8189cfa17c465 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h @@ -0,0 +1,78 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _CI_SMUMANAGER_H_ +#define _CI_SMUMANAGER_H_ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 6 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +#include "smu7_discrete.h" +#include +#include "ppatomctrl.h" + +struct ci_pt_defaults { + u8 svi_load_line_en; + u8 svi_load_line_vddc; + u8 tdc_vddc_throttle_release_limit_perc; + u8 tdc_mawt; + u8 tdc_waterfall_ctl; + u8 dte_ambient_temp_base; + u32 display_cac; + u32 bapm_temp_gradient; + u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; + u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; +}; + +struct ci_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_mc_reg_table { + uint8_t last; + uint8_t num_entries; + uint16_t validflag; + struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_smumgr { + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint32_t ulv_setting_starts; + struct SMU7_Discrete_DpmTable smc_state_table; + struct SMU7_Discrete_PmFuses power_tune_table; + const struct ci_pt_defaults *power_tune_defaults; + SMU7_Discrete_MCRegisters mc_regs; + struct ci_mc_reg_table mc_reg_table; + uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS]; + +}; + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 4527c07bc6794..9c1738f991b66 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -65,6 +65,9 @@ int smum_early_init(struct pp_instance *handle) handle->smu_mgr = smumgr; switch (smumgr->chip_family) { + case AMDGPU_FAMILY_CI: + smumgr->smumgr_funcs = &ci_smu_funcs; + break; case AMDGPU_FAMILY_CZ: smumgr->smumgr_funcs = &cz_smu_funcs; break; From 2a527680a112d6b230961e3714d370a2e369bda9 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 14 Sep 2017 09:10:41 +0800 Subject: [PATCH 157/232] drm/amd/powerplay: fix set target TDP error on tonga/iceland ConfigurableTDP do not exist from Fiji. so only use in previous ASIC. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 0f75af33e581b..0fbaeb19a542a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -905,7 +905,6 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) if (0 == smc_result) { uint32_t default_limit = (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - data->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; @@ -976,10 +975,12 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? hwmgr->platform_descriptor.TDPAdjustment : (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + + if (hwmgr->chip_id > CHIP_TONGA) + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + else + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100; + result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); } From 89c67699ad9436ab99e0daa91bf1cf05cd297bac Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 15 Sep 2017 11:09:20 +0800 Subject: [PATCH 158/232] drm/amd/powerplay: refine dmesg info under powerplay Use pr_debug to prevent spamming unimportant dmesg. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 6 +++--- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 49733c7817179..fa0305e79c4a1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1100,10 +1100,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, } } - PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, - "Can't find requested voltage id in vddc_dependency_on_sclk table!", + if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) { + pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n"); return -EINVAL; - ); + } get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 03075c6ac5127..264e98f4f9550 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1543,7 +1543,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) if (vddc >= 2000 || vddc == 0) return -EINVAL; } else { - pr_warn("failed to retrieving EVV voltage!\n"); + pr_debug("failed to retrieving EVV voltage!\n"); continue; } @@ -3026,11 +3026,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - pr_err("Single MCLK entry VDDCI/MCLK dependency table " + pr_debug("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].vddci != data->vbios_boot_state.vddci_bootup_value) - pr_err("Single VDDCI entry VDDCI/MCLK dependency table " + pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } @@ -3174,11 +3174,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - pr_err("Single MCLK entry VDDCI/MCLK dependency table " + pr_debug("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].v != data->vbios_boot_state.vddci_bootup_value) - pr_err("Single VDDCI entry VDDCI/MCLK dependency table " + pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } From 970d9804b00d41e7c047477d036d451fcf25815e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 14 Sep 2017 21:14:59 +0800 Subject: [PATCH 159/232] drm/amd/powerplay: Add support functions for CI to ppatomctrl.c Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 80 +++++++++++++++++++ .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 6 ++ 2 files changed, 86 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index fa0305e79c4a1..a129bc5b18442 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1418,3 +1418,83 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, return 0; } + +int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) +{ + int result; + SET_VOLTAGE_PS_ALLOCATION allocation; + SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = + (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; + + voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID; + + result = cgs_atom_exec_cmd_table(hwmgr->device, + GetIndexIntoMasterTable(COMMAND, SetVoltage), + voltage_parameters); + + *virtual_voltage_id = voltage_parameters->usVoltageLevel; + + return result; +} + +int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, + uint16_t *vddc, uint16_t *vddci, + uint16_t virtual_voltage_id, + uint16_t efuse_voltage_id) +{ + int i, j; + int ix; + u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; + ATOM_ASIC_PROFILING_INFO_V2_1 *profile; + + *vddc = 0; + *vddci = 0; + + ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); + + profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) + cgs_atom_get_data_table(hwmgr->device, + ix, + NULL, NULL, NULL); + if (!profile) + return -EINVAL; + + if ((profile->asHeader.ucTableFormatRevision >= 2) && + (profile->asHeader.ucTableContentRevision >= 1) && + (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) { + leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset); + vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset); + vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset); + if (profile->ucElbVDDC_Num > 0) { + for (i = 0; i < profile->ucElbVDDC_Num; i++) { + if (vddc_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (efuse_voltage_id <= leakage_bin[j]) { + *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + + vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset); + vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset); + if (profile->ucElbVDDCI_Num > 0) { + for (i = 0; i < profile->ucElbVDDCI_Num; i++) { + if (vddci_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (efuse_voltage_id <= leakage_bin[j]) { + *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index 8d4188ad941af..c44a92064cf1b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -314,5 +314,11 @@ extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, uint16_t *load_line); + +extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, + uint16_t *vddc, uint16_t *vddci, + uint16_t virtual_voltage_id, + uint16_t efuse_voltage_id); +extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id); #endif From 86457c3b21cbde1e5df45a8e11e173414e3dfc31 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 14 Sep 2017 21:05:18 +0800 Subject: [PATCH 160/232] drm/amd/powerplay: Add support for CI asics to hwmgr Add support for CI asics (Bonaire, Hawaii) to the powerplay hwmgr Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 57 +++++---- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 118 ++++++++++++++---- 2 files changed, 128 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index e3bf69c924fd7..8770860de644b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -44,6 +44,7 @@ static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); uint8_t convert_to_vid(uint16_t vddc) { @@ -76,6 +77,13 @@ int hwmgr_early_init(struct pp_instance *handle) hwmgr->fan_ctrl_is_in_default_mode = true; switch (hwmgr->chip_family) { + case AMDGPU_FAMILY_CI: + ci_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); + hwmgr->pp_table_version = PP_TABLE_V0; + smu7_init_function_pointers(hwmgr); + break; case AMDGPU_FAMILY_CZ: cz_init_function_pointers(hwmgr); break; @@ -748,28 +756,8 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); @@ -794,7 +782,6 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM); - return; } @@ -843,7 +830,8 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) { - + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -869,6 +857,8 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -877,12 +867,13 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TDRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - return 0; } int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -896,11 +887,25 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDPowerGating); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating); - return 0; } int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + return 0; +} + +int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); @@ -911,6 +916,8 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EVV); + PHM_PlatformCaps_MemorySpreadSpectrumSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 264e98f4f9550..8fe9d8738ed72 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "pp_acpi.h" #include "ppatomctrl.h" #include "atombios.h" @@ -607,13 +608,20 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) data->dpm_table.pcie_speed_table.count = 6; } /* Populate last level for boot PCIE level, but do not increment count. */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + data->vbios_boot_state.pcie_lane_bootup_value); + } else { + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, data->dpm_table.pcie_speed_table.count, get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - + } return 0; } @@ -689,7 +697,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) allowed_vdd_sclk_table->entries[i].clk) { data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; data->dpm_table.sclk_table.count++; } } @@ -703,7 +711,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) allowed_vdd_mclk_table->entries[i].clk) { data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; data->dpm_table.mclk_table.count++; } } @@ -963,13 +971,24 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); - udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); + } else { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + } } return 0; @@ -998,6 +1017,10 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + if (hwmgr->chip_family == AMDGPU_FAMILY_CI) + cgs_write_register(hwmgr->device, 0x1488, + (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); + if (smu7_enable_sclk_mclk_dpm(hwmgr)) { pr_err("Failed to enable Sclk DPM and Mclk DPM!"); return -EINVAL; @@ -1389,12 +1412,29 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) &tmp3); tmp3 = (tmp3 >> 5) & 0x3; data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; + } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { + data->vddc_phase_shed_control = 1; + } else { + data->vddc_phase_shed_control = 0; + } + + if (hwmgr->chip_id == CHIP_HAWAII) { + data->thermal_temp_setting.temperature_low = 94500; + data->thermal_temp_setting.temperature_high = 95000; + data->thermal_temp_setting.temperature_shutdown = 104000; + } else { + data->thermal_temp_setting.temperature_low = 99500; + data->thermal_temp_setting.temperature_high = 100000; + data->thermal_temp_setting.temperature_shutdown = 104000; } data->fast_watermark_threshold = 100; if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) + data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDGFX)) { @@ -1414,10 +1454,9 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; } - if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { + if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDGFX); - } if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ControlVDDCI)) { @@ -2274,7 +2313,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; } - if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; return 0; @@ -2290,10 +2329,38 @@ static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) +{ + uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int i; + + if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { + for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, + virtual_voltage_id, + efuse_voltage_id) == 0) { + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + if (vddci != 0 && vddci != virtual_voltage_id) { + data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; + data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; + data->vddci_leakage.count++; + } + } + } + } + return 0; +} + static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data; - int result; + int result = 0; data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); if (data == NULL) @@ -2304,11 +2371,15 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) smu7_init_dpm_defaults(hwmgr); /* Get leakage voltage based on leakage ID. */ - result = smu7_get_evv_voltages(hwmgr); - - if (result) { - pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); - return -EINVAL; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV)) { + result = smu7_get_evv_voltages(hwmgr); + if (result) { + pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); + return -EINVAL; + } + } else { + smu7_get_elb_voltages(hwmgr); } if (hwmgr->pp_table_version == PP_TABLE_V1) { @@ -3777,11 +3848,14 @@ static int smu7_notify_link_speed_change_after_state_change( static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int ret = 0; - if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); - return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + ret = (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + } + return ret; } static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) From 780cffc599b640f1ea1ab051496ad1fed4532150 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 12 Sep 2017 13:37:40 +0800 Subject: [PATCH 161/232] drm/amdgpu: add powerplay support for CI asics currently, for CI asics, use dpm by default, amdgpu.dpm=-1. when set amdgpu.dpm=1, enable powplay. when set amdgpu.dpm=0, disable both dpm and powerplay. when powerplay is stable on CI asics, ci_dpm will be removed. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 5cd5b8ee9744b..2d2f0960b0258 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -93,8 +93,16 @@ static int amdgpu_pp_early_init(void *handle) #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: - amd_pp->ip_funcs = &ci_dpm_ip_funcs; - amd_pp->pp_funcs = &ci_dpm_funcs; + if (amdgpu_dpm == -1) { + amd_pp->ip_funcs = &ci_dpm_ip_funcs; + amd_pp->pp_funcs = &ci_dpm_funcs; + } else { + adev->pp_enabled = true; + if (amdgpu_create_pp_handle(adev)) + return -EINVAL; + amd_pp->ip_funcs = &pp_ip_funcs; + amd_pp->pp_funcs = &pp_dpm_funcs; + } break; case CHIP_KABINI: case CHIP_MULLINS: From 0596df6b09cf652844eb08c917da94984177846b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 15 Sep 2017 16:30:52 +0800 Subject: [PATCH 162/232] drm/amd/powerplay: Simplify smu7_voting_clients() Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 59 ++++++------------- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 9 +-- 2 files changed, 19 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 8fe9d8738ed72..4c603e53a171a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -388,6 +388,7 @@ static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int i; /* Clear reset for voting clients before enabling DPM */ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -395,50 +396,26 @@ static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - + for (i = 0; i < 8; i++) + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0 + i * 4, + data->voting_rights_clients[i]); return 0; } static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) { + int i; + /* Reset voting clients before disabling DPM */ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); + for (i = 0; i < 8; i++) + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); return 0; } @@ -1384,14 +1361,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->vddc_vddgfx_delta = 300; data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; - data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; + data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f221e17b67e78..e021154aedbdf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -182,14 +182,7 @@ struct smu7_hwmgr { struct smu7_dpm_table dpm_table; struct smu7_dpm_table golden_dpm_table; - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; + uint32_t voting_rights_clients[8]; uint32_t static_screen_threshold_unit; uint32_t static_screen_threshold; uint32_t voltage_control; From ca290da8f6345c8e8e180256fbe092c751fa9654 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 25 Aug 2017 20:15:04 -0400 Subject: [PATCH 163/232] drm/amdgpu: Fix error handling in amdgpu_vm_init Make sure vm->root.bo is not left reserved if amdgpu_bo_kmap fails. Signed-off-by: Felix Kuehling Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2df254cc802e2..2196bca7331c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2615,9 +2615,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; r = amdgpu_bo_kmap(vm->root.base.bo, NULL); + amdgpu_bo_unreserve(vm->root.base.bo); if (r) goto error_free_root; - amdgpu_bo_unreserve(vm->root.base.bo); } return 0; From 02208441cc3a5110191996bb129db39ff10e7395 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 25 Aug 2017 20:40:26 -0400 Subject: [PATCH 164/232] drm/amdgpu: Add PASID management Allows assigning a PASID to a VM for identifying VMs involved in page faults. The global PASID manager is also exported in the KFD interface so that AMDGPU and KFD can share the PASID space. PASIDs of different sizes can be requested. On APUs, the PASID size is deterined by the capabilities of the IOMMU. So KFD must be able to allocate PASIDs in a smaller range. Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Reviewed-by: Oded Gabbay Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 2 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 75 ++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 14 +++- .../gpu/drm/amd/include/kgd_kfd_interface.h | 6 ++ 6 files changed, 97 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b9dbbf9cb8b07..dc7e25cce7412 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_vm_alloc_pasid, + .free_pasid = amdgpu_vm_free_pasid, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 309f2419c6d8a..c678c69936a0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_vm_alloc_pasid, + .free_pasid = amdgpu_vm_free_pasid, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e16229000a983..79d9ab43d42c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } r = amdgpu_vm_init(adev, &fpriv->vm, - AMDGPU_VM_CONTEXT_GFX); + AMDGPU_VM_CONTEXT_GFX, 0); if (r) { kfree(fpriv); goto out_suspend; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2196bca7331c2..9b795915cab1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -27,11 +27,58 @@ */ #include #include +#include #include #include #include "amdgpu.h" #include "amdgpu_trace.h" +/* + * PASID manager + * + * PASIDs are global address space identifiers that can be shared + * between the GPU, an IOMMU and the driver. VMs on different devices + * may use the same PASID if they share the same address + * space. Therefore PASIDs are allocated using a global IDA. VMs are + * looked up from the PASID per amdgpu_device. + */ +static DEFINE_IDA(amdgpu_vm_pasid_ida); + +/** + * amdgpu_vm_alloc_pasid - Allocate a PASID + * @bits: Maximum width of the PASID in bits, must be at least 1 + * + * Allocates a PASID of the given width while keeping smaller PASIDs + * available if possible. + * + * Returns a positive integer on success. Returns %-EINVAL if bits==0. + * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on + * memory allocation failure. + */ +int amdgpu_vm_alloc_pasid(unsigned int bits) +{ + int pasid = -EINVAL; + + for (bits = min(bits, 31U); bits > 0; bits--) { + pasid = ida_simple_get(&amdgpu_vm_pasid_ida, + 1U << (bits - 1), 1U << bits, + GFP_KERNEL); + if (pasid != -ENOSPC) + break; + } + + return pasid; +} + +/** + * amdgpu_vm_free_pasid - Free a PASID + * @pasid: PASID to free + */ +void amdgpu_vm_free_pasid(unsigned int pasid) +{ + ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); +} + /* * GPUVM * GPUVM is similar to the legacy gart on older asics, however @@ -2539,7 +2586,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_ * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context) + int vm_context, unsigned int pasid) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); @@ -2620,6 +2667,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; } + if (pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, + GFP_ATOMIC); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + if (r < 0) + goto error_free_root; + + vm->pasid = pasid; + } + return 0; error_free_root: @@ -2673,6 +2733,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; int i; + if (vm->pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + } + amd_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va)) { @@ -2752,6 +2820,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->vm_manager.vm_update_mode = 0; #endif + idr_init(&adev->vm_manager.pasid_idr); + spin_lock_init(&adev->vm_manager.pasid_lock); } /** @@ -2765,6 +2835,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { unsigned i, j; + WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); + idr_destroy(&adev->vm_manager.pasid_idr); + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 48c58ae4bb3a1..7873dfa8c0f95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -25,6 +25,7 @@ #define __AMDGPU_VM_H__ #include +#include #include "gpu_scheduler.h" #include "amdgpu_sync.h" @@ -148,8 +149,9 @@ struct amdgpu_vm { /* Scheduler entity for page table updates */ struct amd_sched_entity entity; - /* client id */ + /* client id and PASID (TODO: replace client_id with PASID) */ u64 client_id; + unsigned int pasid; /* dedicated to vm */ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; @@ -220,12 +222,20 @@ struct amdgpu_vm_manager { * BIT1[= 0] Compute updated by SDMA [= 1] by CPU */ int vm_update_mode; + + /* PASID to VM mapping, will be used in interrupt context to + * look up VM of a page fault + */ + struct idr pasid_idr; + spinlock_t pasid_lock; }; +int amdgpu_vm_alloc_pasid(unsigned int bits); +void amdgpu_vm_free_pasid(unsigned int pasid); void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context); + int vm_context, unsigned int pasid); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 94277cb734d2f..f516fd10e6ba7 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -112,6 +112,9 @@ struct tile_config { * * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz * + * @alloc_pasid: Allocate a PASID + * @free_pasid: Free a PASID + * * @program_sh_mem_settings: A function that should initiate the memory * properties such as main aperture memory type (cache / non cached) and * secondary aperture base address, size and memory type. @@ -160,6 +163,9 @@ struct kfd2kgd_calls { uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd); + int (*alloc_pasid)(unsigned int bits); + void (*free_pasid)(unsigned int pasid); + /* Register access functions */ void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, From f4d6229b9db66c6d8fcd5157b4bcc701c099e3e2 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 25 Aug 2017 21:30:18 -0400 Subject: [PATCH 165/232] drm/radeon: Add PASID manager for KFD Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Reviewed-by: Oded Gabbay Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_kfd.c | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index f6578c96925c9..a2ac8ac0930dd 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -58,6 +58,10 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd); static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); + +static int alloc_pasid(unsigned int bits); +static void free_pasid(unsigned int pasid); + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); /* @@ -112,6 +116,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_vmem_size = get_vmem_size, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = alloc_pasid, + .free_pasid = free_pasid, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, @@ -341,6 +347,31 @@ static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100; } +/* + * PASID manager + */ +static DEFINE_IDA(pasid_ida); + +int alloc_pasid(unsigned int bits) +{ + int pasid = -EINVAL; + + for (bits = min(bits, 31U); bits > 0; bits--) { + pasid = ida_simple_get(&pasid_ida, + 1U << (bits - 1), 1U << bits, + GFP_KERNEL); + if (pasid != -ENOSPC) + break; + } + + return pasid; +} + +void free_pasid(unsigned int pasid) +{ + ida_simple_remove(&pasid_ida, pasid); +} + static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd) { return (struct radeon_device *)kgd; From a91e70e30c3e1aaf90ddb851f9736367bc885fb8 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 26 Aug 2017 02:00:57 -0400 Subject: [PATCH 166/232] drm/amdkfd: Separate doorbell allocation from PASID PASID management is moving into KGD. Limiting the PASID range to the number of doorbell pages is no longer practical. Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Reviewed-by: Oded Gabbay Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 ---- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 50 +++++++++++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 10 ++--- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 +++ 4 files changed, 45 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 61fff25b4ce7d..5df12b2872018 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -168,13 +168,6 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd) pasid_limit = min_t(unsigned int, (unsigned int)(1 << kfd->device_info->max_pasid_bits), iommu_info.max_pasids); - /* - * last pasid is used for kernel queues doorbells - * in the future the last pasid might be used for a kernel thread. - */ - pasid_limit = min_t(unsigned int, - pasid_limit, - kfd->doorbell_process_limit - 1); err = amd_iommu_init_device(kfd->pdev, pasid_limit); if (err < 0) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index acf4d2a977adf..feb76c235b1a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -24,16 +24,15 @@ #include #include #include +#include /* - * This extension supports a kernel level doorbells management for - * the kernel queues. - * Basically the last doorbells page is devoted to kernel queues - * and that's assures that any user process won't get access to the - * kernel doorbells page + * This extension supports a kernel level doorbells management for the + * kernel queues using the first doorbell page reserved for the kernel. */ -#define KERNEL_DOORBELL_PASID 1 +static DEFINE_IDA(doorbell_ida); +static unsigned int max_doorbell_slices; #define KFD_SIZE_OF_DOORBELL_IN_BYTES 4 /* @@ -84,13 +83,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd) (doorbell_aperture_size - doorbell_start_offset) / doorbell_process_allocation(); else - doorbell_process_limit = 0; + return -ENOSPC; + + if (!max_doorbell_slices || + doorbell_process_limit < max_doorbell_slices) + max_doorbell_slices = doorbell_process_limit; kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + doorbell_start_offset; kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); - kfd->doorbell_process_limit = doorbell_process_limit - 1; kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, doorbell_process_allocation()); @@ -185,11 +187,10 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, return NULL; /* - * Calculating the kernel doorbell offset using "faked" kernel - * pasid that allocated for kernel queues only + * Calculating the kernel doorbell offset using the first + * doorbell page. */ - *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() / - sizeof(u32)) + inx; + *doorbell_off = kfd->doorbell_id_offset + inx; pr_debug("Get kernel queue doorbell\n" " doorbell offset == 0x%08X\n" @@ -228,11 +229,12 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, { /* * doorbell_id_offset accounts for doorbells taken by KGD. - * pasid * doorbell_process_allocation/sizeof(u32) adjusts - * to the process's doorbells + * index * doorbell_process_allocation/sizeof(u32) adjusts to + * the process's doorbells. */ return kfd->doorbell_id_offset + - process->pasid * (doorbell_process_allocation()/sizeof(u32)) + + process->doorbell_index + * doorbell_process_allocation() / sizeof(u32) + queue_id; } @@ -250,5 +252,21 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, struct kfd_process *process) { return dev->doorbell_base + - process->pasid * doorbell_process_allocation(); + process->doorbell_index * doorbell_process_allocation(); +} + +int kfd_alloc_process_doorbells(struct kfd_process *process) +{ + int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices, + GFP_KERNEL); + if (r > 0) + process->doorbell_index = r; + + return r; +} + +void kfd_free_process_doorbells(struct kfd_process *process) +{ + if (process->doorbell_index) + ida_simple_remove(&doorbell_ida, process->doorbell_index); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b397ec726400c..4cb90f5179062 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -157,9 +157,6 @@ struct kfd_dev { * to HW doorbell, GFX reserved some * at the start) */ - size_t doorbell_process_limit; /* Number of processes we have doorbell - * space for. - */ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells * page used by kernel queue */ @@ -495,6 +492,7 @@ struct kfd_process { struct rcu_head rcu; unsigned int pasid; + unsigned int doorbell_index; /* * List of kfd_process_device structures, @@ -583,6 +581,10 @@ void write_kernel_doorbell(u32 __iomem *db, u32 value); unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, struct kfd_process *process, unsigned int queue_id); +phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, + struct kfd_process *process); +int kfd_alloc_process_doorbells(struct kfd_process *process); +void kfd_free_process_doorbells(struct kfd_process *process); /* GTT Sub-Allocator */ @@ -694,8 +696,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, void pm_release_ib(struct packet_manager *pm); uint64_t kfd_get_number_elems(struct kfd_dev *kfd); -phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, - struct kfd_process *process); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index c74cf22a1ed9d..9e65ce3c1967f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -183,6 +183,7 @@ static void kfd_process_wq_release(struct work_struct *work) kfd_event_free_process(p); kfd_pasid_free(p->pasid); + kfd_free_process_doorbells(p); mutex_unlock(&p->mutex); @@ -288,6 +289,9 @@ static struct kfd_process *create_process(const struct task_struct *thread) if (process->pasid == 0) goto err_alloc_pasid; + if (kfd_alloc_process_doorbells(process) < 0) + goto err_alloc_doorbells; + mutex_init(&process->mutex); process->mm = thread->mm; @@ -329,6 +333,8 @@ static struct kfd_process *create_process(const struct task_struct *thread) mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); err_mmu_notifier: mutex_destroy(&process->mutex); + kfd_free_process_doorbells(process); +err_alloc_doorbells: kfd_pasid_free(process->pasid); err_alloc_pasid: kfree(process->queues); From d2791c45636927986f732482cbba4ed7c758a115 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 26 Aug 2017 02:10:12 -0400 Subject: [PATCH 167/232] drm/amdkfd: Use PASID manager from KGD Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Reviewed-by: Oded Gabbay Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_module.c | 6 -- drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | 90 +++++++++++-------------- 2 files changed, 38 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 0d73bea22c450..6c5a9cab55ded 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -103,10 +103,6 @@ static int __init kfd_module_init(void) return -1; } - err = kfd_pasid_init(); - if (err < 0) - return err; - err = kfd_chardev_init(); if (err < 0) goto err_ioctl; @@ -126,7 +122,6 @@ static int __init kfd_module_init(void) err_topology: kfd_chardev_exit(); err_ioctl: - kfd_pasid_exit(); return err; } @@ -137,7 +132,6 @@ static void __exit kfd_module_exit(void) kfd_process_destroy_wq(); kfd_topology_shutdown(); kfd_chardev_exit(); - kfd_pasid_exit(); dev_info(kfd_device, "Removed module\n"); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 1e06de0bc6739..d6a796144269d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -20,78 +20,64 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include #include #include "kfd_priv.h" -static unsigned long *pasid_bitmap; -static unsigned int pasid_limit; -static DEFINE_MUTEX(pasid_mutex); - -int kfd_pasid_init(void) -{ - pasid_limit = KFD_MAX_NUM_OF_PROCESSES; - - pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), - GFP_KERNEL); - if (!pasid_bitmap) - return -ENOMEM; - - set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */ - - return 0; -} - -void kfd_pasid_exit(void) -{ - kfree(pasid_bitmap); -} +static unsigned int pasid_bits = 16; +static const struct kfd2kgd_calls *kfd2kgd; bool kfd_set_pasid_limit(unsigned int new_limit) { - if (new_limit < pasid_limit) { - bool ok; - - mutex_lock(&pasid_mutex); - - /* ensure that no pasids >= new_limit are in-use */ - ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) == - pasid_limit); - if (ok) - pasid_limit = new_limit; - - mutex_unlock(&pasid_mutex); - - return ok; + if (new_limit < 2) + return false; + + if (new_limit < (1U << pasid_bits)) { + if (kfd2kgd) + /* We've already allocated user PASIDs, too late to + * change the limit + */ + return false; + + while (new_limit < (1U << pasid_bits)) + pasid_bits--; } return true; } -inline unsigned int kfd_get_pasid_limit(void) +unsigned int kfd_get_pasid_limit(void) { - return pasid_limit; + return 1U << pasid_bits; } unsigned int kfd_pasid_alloc(void) { - unsigned int found; - - mutex_lock(&pasid_mutex); - - found = find_first_zero_bit(pasid_bitmap, pasid_limit); - if (found == pasid_limit) - found = 0; - else - set_bit(found, pasid_bitmap); + int r; + + /* Find the first best KFD device for calling KGD */ + if (!kfd2kgd) { + struct kfd_dev *dev = NULL; + unsigned int i = 0; + + while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) { + if (dev && dev->kfd2kgd) { + kfd2kgd = dev->kfd2kgd; + break; + } + i++; + } + + if (!kfd2kgd) + return false; + } - mutex_unlock(&pasid_mutex); + r = kfd2kgd->alloc_pasid(pasid_bits); - return found; + return r > 0 ? r : 0; } void kfd_pasid_free(unsigned int pasid) { - if (!WARN_ON(pasid == 0 || pasid >= pasid_limit)) - clear_bit(pasid, pasid_bitmap); + if (kfd2kgd) + kfd2kgd->free_pasid(pasid); } From 00ecd8a27c03b6dd463ab8755dd6d58751d76297 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 26 Aug 2017 02:40:45 -0400 Subject: [PATCH 168/232] drm/amdgpu: Add prescreening stage in IH processing (v2) To filter out high-frequency interrupts that can be safely ignored. v2: squash in trivial typo fix for si (Alex) Signed-off-by: Felix Kuehling Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/cik_ih.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/si_ih.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 14 ++++++++++++++ 8 files changed, 92 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 802fdc11944b3..d62a35ef26be0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -334,6 +334,7 @@ struct amdgpu_gart_funcs { struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ u32 (*get_wptr)(struct amdgpu_device *adev); + bool (*prescreen_iv)(struct amdgpu_device *adev); void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); void (*set_rptr)(struct amdgpu_device *adev); @@ -1749,6 +1750,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) +#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 3ab4c65ecc8b4..c834a40cfad6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -169,6 +169,12 @@ int amdgpu_ih_process(struct amdgpu_device *adev) while (adev->irq.ih.rptr != wptr) { u32 ring_index = adev->irq.ih.rptr >> 2; + /* Prescreening of high-frequency interrupts */ + if (!amdgpu_ih_prescreen_iv(adev)) { + adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; + continue; + } + /* Before dispatching irq to IP blocks, send it to amdkfd */ amdgpu_amdkfd_interrupt(adev, (const void *) &adev->irq.ih.ring[ring_index]); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index b8918432c5722..07d3d895da108 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -228,6 +228,19 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) * [127:96] - reserved */ +/** + * cik_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool cik_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* Process all interrupts */ + return true; +} + /** * cik_ih_decode_iv - decode an interrupt vector * @@ -433,6 +446,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { static const struct amdgpu_ih_funcs cik_ih_funcs = { .get_wptr = cik_ih_get_wptr, + .prescreen_iv = cik_ih_prescreen_iv, .decode_iv = cik_ih_decode_iv, .set_rptr = cik_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 0c1209cdd1cb8..b6cdf4afaf465 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -207,6 +207,19 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } +/** + * cz_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool cz_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* Process all interrupts */ + return true; +} + /** * cz_ih_decode_iv - decode an interrupt vector * @@ -414,6 +427,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { static const struct amdgpu_ih_funcs cz_ih_funcs = { .get_wptr = cz_ih_get_wptr, + .prescreen_iv = cz_ih_prescreen_iv, .decode_iv = cz_ih_decode_iv, .set_rptr = cz_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 7a0ea27ac4295..65ed6d3a8f05f 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -207,6 +207,19 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } +/** + * iceland_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* Process all interrupts */ + return true; +} + /** * iceland_ih_decode_iv - decode an interrupt vector * @@ -412,6 +425,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { static const struct amdgpu_ih_funcs iceland_ih_funcs = { .get_wptr = iceland_ih_get_wptr, + .prescreen_iv = iceland_ih_prescreen_iv, .decode_iv = iceland_ih_decode_iv, .set_rptr = iceland_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index ce25e03a077da..d2c6b80309c8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -118,6 +118,19 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } +/** + * si_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool si_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* Process all interrupts */ + return true; +} + static void si_ih_decode_iv(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { @@ -288,6 +301,7 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { static const struct amdgpu_ih_funcs si_ih_funcs = { .get_wptr = si_ih_get_wptr, + .prescreen_iv = si_ih_prescreen_iv, .decode_iv = si_ih_decode_iv, .set_rptr = si_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 923df2c0e5352..5ed00692618e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -218,6 +218,19 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } +/** + * tonga_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* Process all interrupts */ + return true; +} + /** * tonga_ih_decode_iv - decode an interrupt vector * @@ -478,6 +491,7 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { static const struct amdgpu_ih_funcs tonga_ih_funcs = { .get_wptr = tonga_ih_get_wptr, + .prescreen_iv = tonga_ih_prescreen_iv, .decode_iv = tonga_ih_decode_iv, .set_rptr = tonga_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 56150e8d1ed21..eda4771e273f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -226,6 +226,19 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } +/** + * vega10_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) +{ + /* TODO: Filter known pending page faults */ + return true; +} + /** * vega10_ih_decode_iv - decode an interrupt vector * @@ -410,6 +423,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, + .prescreen_iv = vega10_ih_prescreen_iv, .decode_iv = vega10_ih_decode_iv, .set_rptr = vega10_ih_set_rptr }; From 5d86b2c391965cbcb295e8fa795276977b2a416e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 24 Aug 2017 19:22:32 -0400 Subject: [PATCH 169/232] drm/amd: Closed hash table with low overhead (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a statically sized closed hash table implementation with low memory and CPU overhead. The API is inspired by kfifo. Storing, retrieving and deleting data does not involve any dynamic memory management, which makes it ideal for use in interrupt context. Static memory usage per entry comprises a 32 or 64 bit hash key, two bits for occupancy tracking and the value size stored in the table. No list heads or pointers are needed. Therefore this data structure should be quite cache-friendly, too. It uses linear probing and lazy deletion. During lookups free space is reclaimed and entries relocated to speed up future lookups. v2: squash in do_div and _BITOPS_LONG_SHIFT fixes Signed-off-by: Felix Kuehling Acked-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/amd/include/linux/chash.h | 366 +++++++++++++ drivers/gpu/drm/amd/lib/Kconfig | 27 + drivers/gpu/drm/amd/lib/Makefile | 11 + drivers/gpu/drm/amd/lib/chash.c | 638 ++++++++++++++++++++++ 6 files changed, 1045 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/linux/chash.h create mode 100644 drivers/gpu/drm/amd/lib/Kconfig create mode 100644 drivers/gpu/drm/amd/lib/Makefile create mode 100644 drivers/gpu/drm/amd/lib/chash.c diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 83cb2a88c204f..1989c276138cc 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -191,6 +191,8 @@ config DRM_AMDGPU source "drivers/gpu/drm/amd/amdgpu/Kconfig" +source "drivers/gpu/drm/amd/lib/Kconfig" + source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/i915/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a8acc197dec37..4cac997bb8df4 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_DRM_ARM) += arm/ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ +obj-y += amd/lib/ obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h new file mode 100644 index 0000000000000..6dc159924ed10 --- /dev/null +++ b/drivers/gpu/drm/amd/include/linux/chash.h @@ -0,0 +1,366 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _LINUX_CHASH_H +#define _LINUX_CHASH_H + +#include +#include +#include +#include + +#if BITS_PER_LONG == 32 +# define _CHASH_LONG_SHIFT 5 +#elif BITS_PER_LONG == 64 +# define _CHASH_LONG_SHIFT 6 +#else +# error "Unexpected BITS_PER_LONG" +#endif + +struct __chash_table { + u8 bits; + u8 key_size; + unsigned int value_size; + u32 size_mask; + unsigned long *occup_bitmap, *valid_bitmap; + union { + u32 *keys32; + u64 *keys64; + }; + u8 *values; + +#ifdef CONFIG_CHASH_STATS + u64 hits, hits_steps, hits_time_ns; + u64 miss, miss_steps, miss_time_ns; + u64 relocs, reloc_dist; +#endif +}; + +#define __CHASH_BITMAP_SIZE(bits) \ + (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG) +#define __CHASH_ARRAY_SIZE(bits, size) \ + ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long)) + +#define __CHASH_DATA_SIZE(bits, key_size, value_size) \ + (__CHASH_BITMAP_SIZE(bits) * 2 + \ + __CHASH_ARRAY_SIZE(bits, key_size) + \ + __CHASH_ARRAY_SIZE(bits, value_size)) + +#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \ + struct { \ + struct __chash_table table; \ + unsigned long data \ + [__CHASH_DATA_SIZE(bits, key_size, value_size)];\ + } + +/** + * struct chash_table - Dynamically allocated closed hash table + * + * Use this struct for dynamically allocated hash tables (using + * chash_table_alloc and chash_table_free), where the size is + * determined at runtime. + */ +struct chash_table { + struct __chash_table table; + unsigned long *data; +}; + +/** + * DECLARE_CHASH_TABLE - macro to declare a closed hash table + * @table: name of the declared hash table + * @bts: Table size will be 2^bits entries + * @key_sz: Size of hash keys in bytes, 4 or 8 + * @val_sz: Size of data values in bytes, can be 0 + * + * This declares the hash table variable with a static size. + * + * The closed hash table stores key-value pairs with low memory and + * lookup overhead. In operation it performs no dynamic memory + * management. The data being stored does not require any + * list_heads. The hash table performs best with small @val_sz and as + * long as some space (about 50%) is left free in the table. But the + * table can still work reasonably efficiently even when filled up to + * about 90%. If bigger data items need to be stored and looked up, + * store the pointer to it as value in the hash table. + * + * @val_sz may be 0. This can be useful when all the stored + * information is contained in the key itself and the fact that it is + * in the hash table (or not). + */ +#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \ + STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table + +#ifdef CONFIG_CHASH_STATS +#define __CHASH_STATS_INIT(prefix), \ + prefix.hits = 0, \ + prefix.hits_steps = 0, \ + prefix.hits_time_ns = 0, \ + prefix.miss = 0, \ + prefix.miss_steps = 0, \ + prefix.miss_time_ns = 0, \ + prefix.relocs = 0, \ + prefix.reloc_dist = 0 +#else +#define __CHASH_STATS_INIT(prefix) +#endif + +#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \ + prefix.bits = (bts), \ + prefix.key_size = (key_sz), \ + prefix.value_size = (val_sz), \ + prefix.size_mask = ((1 << bts) - 1), \ + prefix.occup_bitmap = &data[0], \ + prefix.valid_bitmap = &data \ + [__CHASH_BITMAP_SIZE(bts)], \ + prefix.keys64 = (u64 *)&data \ + [__CHASH_BITMAP_SIZE(bts) * 2], \ + prefix.values = (u8 *)&data \ + [__CHASH_BITMAP_SIZE(bts) * 2 + \ + __CHASH_ARRAY_SIZE(bts, key_sz)] \ + __CHASH_STATS_INIT(prefix) + +/** + * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table + * @tbl: name of the declared hash table + * @bts: Table size will be 2^bits entries + * @key_sz: Size of hash keys in bytes, 4 or 8 + * @val_sz: Size of data values in bytes, can be 0 + * + * Note: the macro can be used for global and local hash table variables. + */ +#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \ + DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \ + .table = { \ + __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \ + }, \ + .data = {0} \ + } + +/** + * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE + * @tbl: name of the declared hash table + * @bts: Table size will be 2^bits entries + * @key_sz: Size of hash keys in bytes, 4 or 8 + * @val_sz: Size of data values in bytes, can be 0 + */ +#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \ + __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz) + +int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size, + unsigned int value_size, gfp_t gfp_mask); +void chash_table_free(struct chash_table *table); + +/** + * chash_table_dump_stats - Dump statistics of a closed hash table + * @tbl: Pointer to the table structure + * + * Dumps some performance statistics of the table gathered in operation + * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled, + * user must turn on messages for chash.c (file chash.c +p). + */ +#ifdef CONFIG_CHASH_STATS +#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table) + +void __chash_table_dump_stats(struct __chash_table *table); +#else +#define chash_table_dump_stats(tbl) +#endif + +/** + * chash_table_reset_stats - Reset statistics of a closed hash table + * @tbl: Pointer to the table structure + */ +#ifdef CONFIG_CHASH_STATS +#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table) + +static inline void __chash_table_reset_stats(struct __chash_table *table) +{ + (void)table __CHASH_STATS_INIT((*table)); +} +#else +#define chash_table_reset_stats(tbl) +#endif + +/** + * chash_table_copy_in - Copy a new value into the hash table + * @tbl: Pointer to the table structure + * @key: Key of the entry to add or update + * @value: Pointer to value to copy, may be NULL + * + * If @key already has an entry, its value is replaced. Otherwise a + * new entry is added. If @value is NULL, the value is left unchanged + * or uninitialized. Returns 1 if an entry already existed, 0 if a new + * entry was added or %-ENOMEM if there was no free space in the + * table. + */ +#define chash_table_copy_in(tbl, key, value) \ + __chash_table_copy_in(&(*tbl).table, key, value) + +int __chash_table_copy_in(struct __chash_table *table, u64 key, + const void *value); + +/** + * chash_table_copy_out - Copy a value out of the hash table + * @tbl: Pointer to the table structure + * @key: Key of the entry to find + * @value: Pointer to value to copy, may be NULL + * + * If @value is not NULL and the table has a non-0 value_size, the + * value at @key is copied to @value. Returns the slot index of the + * entry or %-EINVAL if @key was not found. + */ +#define chash_table_copy_out(tbl, key, value) \ + __chash_table_copy_out(&(*tbl).table, key, value, false) + +int __chash_table_copy_out(struct __chash_table *table, u64 key, + void *value, bool remove); + +/** + * chash_table_remove - Remove an entry from the hash table + * @tbl: Pointer to the table structure + * @key: Key of the entry to find + * @value: Pointer to value to copy, may be NULL + * + * If @value is not NULL and the table has a non-0 value_size, the + * value at @key is copied to @value. The entry is removed from the + * table. Returns the slot index of the removed entry or %-EINVAL if + * @key was not found. + */ +#define chash_table_remove(tbl, key, value) \ + __chash_table_copy_out(&(*tbl).table, key, value, true) + +/* + * Low level iterator API used internally by the above functions. + */ +struct chash_iter { + struct __chash_table *table; + unsigned long mask; + int slot; +}; + +/** + * CHASH_ITER_INIT - Initialize a hash table iterator + * @tbl: Pointer to hash table to iterate over + * @s: Initial slot number + */ +#define CHASH_ITER_INIT(table, s) { \ + table, \ + 1UL << ((s) & (BITS_PER_LONG - 1)), \ + s \ + } +/** + * CHASH_ITER_SET - Set hash table iterator to new slot + * @iter: Iterator + * @s: Slot number + */ +#define CHASH_ITER_SET(iter, s) \ + (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \ + (iter).slot = (s) +/** + * CHASH_ITER_INC - Increment hash table iterator + * @table: Hash table to iterate over + * + * Wraps around at the end. + */ +#define CHASH_ITER_INC(iter) do { \ + (iter).mask = (iter).mask << 1 | \ + (iter).mask >> (BITS_PER_LONG - 1); \ + (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \ + } while (0) + +static inline bool chash_iter_is_valid(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] & + iter.mask); +} +static inline bool chash_iter_is_empty(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] & + iter.mask); +} + +static inline void chash_iter_set_valid(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask; + iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask; +} +static inline void chash_iter_set_invalid(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask; +} +static inline void chash_iter_set_empty(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask; +} + +static inline u32 chash_iter_key32(const struct chash_iter iter) +{ + BUG_ON(iter.table->key_size != 4); + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return iter.table->keys32[iter.slot]; +} +static inline u64 chash_iter_key64(const struct chash_iter iter) +{ + BUG_ON(iter.table->key_size != 8); + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return iter.table->keys64[iter.slot]; +} +static inline u64 chash_iter_key(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return (iter.table->key_size == 4) ? + iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot]; +} + +static inline u32 chash_iter_hash32(const struct chash_iter iter) +{ + BUG_ON(iter.table->key_size != 4); + return hash_32(chash_iter_key32(iter), iter.table->bits); +} + +static inline u32 chash_iter_hash64(const struct chash_iter iter) +{ + BUG_ON(iter.table->key_size != 8); + return hash_64(chash_iter_key64(iter), iter.table->bits); +} + +static inline u32 chash_iter_hash(const struct chash_iter iter) +{ + return (iter.table->key_size == 4) ? + hash_32(chash_iter_key32(iter), iter.table->bits) : + hash_64(chash_iter_key64(iter), iter.table->bits); +} + +static inline void *chash_iter_value(const struct chash_iter iter) +{ + BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits)); + return iter.table->values + + ((unsigned long)iter.slot * iter.table->value_size); +} + +#endif /* _LINUX_CHASH_H */ diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig new file mode 100644 index 0000000000000..03ee7ad21ac3f --- /dev/null +++ b/drivers/gpu/drm/amd/lib/Kconfig @@ -0,0 +1,27 @@ +menu "AMD Library routines" + +# +# Closed hash table +# +config CHASH + tristate "Closed hash table" + help + Statically sized closed hash table implementation with low + memory and CPU overhead. + +config CHASH_STATS + bool "Closed hash table performance statistics" + depends on CHASH + default n + help + Enable collection of performance statistics for closed hash tables. + +config CHASH_SELFTEST + bool "Closed hash table self test" + depends on CHASH + default n + help + Runs a selftest during module load. Several module parameters + are available to modify the behaviour of the test. + +endmenu diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile new file mode 100644 index 0000000000000..87cd7009e80f1 --- /dev/null +++ b/drivers/gpu/drm/amd/lib/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for AMD library routines, which are used by AMD driver +# components. +# +# This is for common library routines that can be shared between AMD +# driver components or later moved to kernel/lib for sharing with +# other drivers. + +ccflags-y := -I$(src)/../include + +obj-$(CONFIG_CHASH) += chash.o diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c new file mode 100644 index 0000000000000..e07e6f3030d61 --- /dev/null +++ b/drivers/gpu/drm/amd/lib/chash.c @@ -0,0 +1,638 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * chash_table_alloc - Allocate closed hash table + * @table: Pointer to the table structure + * @bits: Table size will be 2^bits entries + * @key_size: Size of hash keys in bytes, 4 or 8 + * @value_size: Size of data values in bytes, can be 0 + */ +int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size, + unsigned int value_size, gfp_t gfp_mask) +{ + if (bits > 31) + return -EINVAL; + + if (key_size != 4 && key_size != 8) + return -EINVAL; + + table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size), + sizeof(long), gfp_mask); + if (!table->data) + return -ENOMEM; + + __CHASH_TABLE_INIT(table->table, table->data, + bits, key_size, value_size); + + return 0; +} +EXPORT_SYMBOL(chash_table_alloc); + +/** + * chash_table_free - Free closed hash table + * @table: Pointer to the table structure + */ +void chash_table_free(struct chash_table *table) +{ + kfree(table->data); +} +EXPORT_SYMBOL(chash_table_free); + +#ifdef CONFIG_CHASH_STATS + +#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \ + u64 __nom = (nom); \ + u64 __denom = (denom); \ + u64 __quot, __frac; \ + u32 __rem; \ + \ + while (__denom >> 32) { \ + __nom >>= 1; \ + __denom >>= 1; \ + } \ + __quot = __nom; \ + __rem = do_div(__quot, __denom); \ + __frac = __rem * (frac_digits) + (__denom >> 1); \ + do_div(__frac, __denom); \ + (quot) = __quot; \ + (frac) = __frac; \ + } while (0) + +void __chash_table_dump_stats(struct __chash_table *table) +{ + struct chash_iter iter = CHASH_ITER_INIT(table, 0); + u32 filled = 0, empty = 0, tombstones = 0; + u64 quot1, quot2; + u32 frac1, frac2; + + do { + if (chash_iter_is_valid(iter)) + filled++; + else if (chash_iter_is_empty(iter)) + empty++; + else + tombstones++; + CHASH_ITER_INC(iter); + } while (iter.slot); + + pr_debug("chash: key size %u, value size %u\n", + table->key_size, table->value_size); + pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n", + 1 << table->bits, filled, empty, tombstones); + if (table->hits > 0) { + DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000); + DIV_FRAC(table->hits * 1000, table->hits_time_ns, + quot2, frac2, 1000); + } else { + quot1 = quot2 = 0; + frac1 = frac2 = 0; + } + pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n", + table->hits, quot1, frac1, quot2, frac2); + if (table->miss > 0) { + DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000); + DIV_FRAC(table->miss * 1000, table->miss_time_ns, + quot2, frac2, 1000); + } else { + quot1 = quot2 = 0; + frac1 = frac2 = 0; + } + pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n", + table->miss, quot1, frac1, quot2, frac2); + if (table->hits + table->miss > 0) { + DIV_FRAC(table->hits_steps + table->miss_steps, + table->hits + table->miss, quot1, frac1, 1000); + DIV_FRAC((table->hits + table->miss) * 1000, + (table->hits_time_ns + table->miss_time_ns), + quot2, frac2, 1000); + } else { + quot1 = quot2 = 0; + frac1 = frac2 = 0; + } + pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n", + table->hits + table->miss, quot1, frac1, quot2, frac2); + if (table->relocs > 0) { + DIV_FRAC(table->hits + table->miss, table->relocs, + quot1, frac1, 1000); + DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000); + pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n", + table->relocs, quot1, frac1, quot2, frac2); + } else { + pr_debug(" No relocations\n"); + } +} +EXPORT_SYMBOL(__chash_table_dump_stats); + +#undef DIV_FRAC +#endif + +#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask) +#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask) +#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask) +#define CHASH_IN_RANGE(table, slot, first, last) \ + (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first)) + +/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/ +#ifdef CHASH_DEBUG +static void chash_table_dump(struct __chash_table *table) +{ + struct chash_iter iter = CHASH_ITER_INIT(table, 0); + + do { + if ((iter.slot & 3) == 0) + pr_debug("%04x: ", iter.slot); + + if (chash_iter_is_valid(iter)) + pr_debug("[%016llx] ", chash_iter_key(iter)); + else if (chash_iter_is_empty(iter)) + pr_debug("[ ] "); + else + pr_debug("[ ] "); + + if ((iter.slot & 3) == 3) + pr_debug("\n"); + + CHASH_ITER_INC(iter); + } while (iter.slot); + + if ((iter.slot & 3) != 0) + pr_debug("\n"); +} + +static int chash_table_check(struct __chash_table *table) +{ + u32 hash; + struct chash_iter iter = CHASH_ITER_INIT(table, 0); + struct chash_iter cur = CHASH_ITER_INIT(table, 0); + + do { + if (!chash_iter_is_valid(iter)) { + CHASH_ITER_INC(iter); + continue; + } + + hash = chash_iter_hash(iter); + CHASH_ITER_SET(cur, hash); + while (cur.slot != iter.slot) { + if (chash_iter_is_empty(cur)) { + pr_err("Path to element at %x with hash %x broken at slot %x\n", + iter.slot, hash, cur.slot); + chash_table_dump(table); + return -EINVAL; + } + CHASH_ITER_INC(cur); + } + + CHASH_ITER_INC(iter); + } while (iter.slot); + + return 0; +} +#endif + +static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src) +{ + BUG_ON(src.table == dst.table && src.slot == dst.slot); + BUG_ON(src.table->key_size != src.table->key_size); + BUG_ON(src.table->value_size != src.table->value_size); + + if (dst.table->key_size == 4) + dst.table->keys32[dst.slot] = src.table->keys32[src.slot]; + else + dst.table->keys64[dst.slot] = src.table->keys64[src.slot]; + + if (dst.table->value_size) + memcpy(chash_iter_value(dst), chash_iter_value(src), + dst.table->value_size); + + chash_iter_set_valid(dst); + chash_iter_set_invalid(src); + +#ifdef CONFIG_CHASH_STATS + if (src.table == dst.table) { + dst.table->relocs++; + dst.table->reloc_dist += + CHASH_SUB(dst.table, src.slot, dst.slot); + } +#endif +} + +/** + * __chash_table_find - Helper for looking up a hash table entry + * @iter: Pointer to hash table iterator + * @key: Key of the entry to find + * @for_removal: set to true if the element will be removed soon + * + * Searches for an entry in the hash table with a given key. iter must + * be initialized by the caller to point to the home position of the + * hypothetical entry, i.e. it must be initialized with the hash table + * and the key's hash as the initial slot for the search. + * + * This function also does some local clean-up to speed up future + * look-ups by relocating entries to better slots and removing + * tombstones that are no longer needed. + * + * If @for_removal is true, the function avoids relocating the entry + * that is being returned. + * + * Returns 0 if the search is successful. In this case iter is updated + * to point to the found entry. Otherwise %-EINVAL is returned and the + * iter is updated to point to the first available slot for the given + * key. If the table is full, the slot is set to -1. + */ +static int chash_table_find(struct chash_iter *iter, u64 key, + bool for_removal) +{ +#ifdef CONFIG_CHASH_STATS + u64 ts1 = local_clock(); +#endif + u32 hash = iter->slot; + struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1); + int first_avail = (for_removal ? -2 : -1); + + while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) { + if (chash_iter_is_empty(*iter)) { + /* Found an empty slot, which ends the + * search. Clean up any preceding tombstones + * that are no longer needed because they lead + * to no-where + */ + if ((int)first_redundant.slot < 0) + goto not_found; + while (first_redundant.slot != iter->slot) { + if (!chash_iter_is_valid(first_redundant)) + chash_iter_set_empty(first_redundant); + CHASH_ITER_INC(first_redundant); + } +#ifdef CHASH_DEBUG + chash_table_check(iter->table); +#endif + goto not_found; + } else if (!chash_iter_is_valid(*iter)) { + /* Found a tombstone. Remember it as candidate + * for relocating the entry we're looking for + * or for adding a new entry with the given key + */ + if (first_avail == -1) + first_avail = iter->slot; + /* Or mark it as the start of a series of + * potentially redundant tombstones + */ + else if (first_redundant.slot == -1) + CHASH_ITER_SET(first_redundant, iter->slot); + } else if (first_redundant.slot >= 0) { + /* Found a valid, occupied slot with a + * preceding series of tombstones. Relocate it + * to a better position that no longer depends + * on those tombstones + */ + u32 cur_hash = chash_iter_hash(*iter); + + if (!CHASH_IN_RANGE(iter->table, cur_hash, + first_redundant.slot + 1, + iter->slot)) { + /* This entry has a hash at or before + * the first tombstone we found. We + * can relocate it to that tombstone + * and advance to the next tombstone + */ + chash_iter_relocate(first_redundant, *iter); + do { + CHASH_ITER_INC(first_redundant); + } while (chash_iter_is_valid(first_redundant)); + } else if (cur_hash != iter->slot) { + /* Relocate entry to its home position + * or as close as possible so it no + * longer depends on any preceding + * tombstones + */ + struct chash_iter new_iter = + CHASH_ITER_INIT(iter->table, cur_hash); + + while (new_iter.slot != iter->slot && + chash_iter_is_valid(new_iter)) + CHASH_ITER_INC(new_iter); + + if (new_iter.slot != iter->slot) + chash_iter_relocate(new_iter, *iter); + } + } + + CHASH_ITER_INC(*iter); + if (iter->slot == hash) { + iter->slot = -1; + goto not_found; + } + } + +#ifdef CONFIG_CHASH_STATS + iter->table->hits++; + iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1; +#endif + + if (first_avail >= 0) { + CHASH_ITER_SET(first_redundant, first_avail); + chash_iter_relocate(first_redundant, *iter); + iter->slot = first_redundant.slot; + iter->mask = first_redundant.mask; + } + +#ifdef CONFIG_CHASH_STATS + iter->table->hits_time_ns += local_clock() - ts1; +#endif + + return 0; + +not_found: +#ifdef CONFIG_CHASH_STATS + iter->table->miss++; + iter->table->miss_steps += (iter->slot < 0) ? + (1 << iter->table->bits) : + CHASH_SUB(iter->table, iter->slot, hash) + 1; +#endif + + if (first_avail >= 0) + CHASH_ITER_SET(*iter, first_avail); + +#ifdef CONFIG_CHASH_STATS + iter->table->miss_time_ns += local_clock() - ts1; +#endif + + return -EINVAL; +} + +int __chash_table_copy_in(struct __chash_table *table, u64 key, + const void *value) +{ + u32 hash = (table->key_size == 4) ? + hash_32(key, table->bits) : hash_64(key, table->bits); + struct chash_iter iter = CHASH_ITER_INIT(table, hash); + int r = chash_table_find(&iter, key, false); + + /* Found an existing entry */ + if (!r) { + if (value && table->value_size) + memcpy(chash_iter_value(iter), value, + table->value_size); + return 1; + } + + /* Is there a place to add a new entry? */ + if (iter.slot < 0) { + pr_err("Hash table overflow\n"); + return -ENOMEM; + } + + chash_iter_set_valid(iter); + + if (table->key_size == 4) + table->keys32[iter.slot] = key; + else + table->keys64[iter.slot] = key; + if (value && table->value_size) + memcpy(chash_iter_value(iter), value, table->value_size); + + return 0; +} +EXPORT_SYMBOL(__chash_table_copy_in); + +int __chash_table_copy_out(struct __chash_table *table, u64 key, + void *value, bool remove) +{ + u32 hash = (table->key_size == 4) ? + hash_32(key, table->bits) : hash_64(key, table->bits); + struct chash_iter iter = CHASH_ITER_INIT(table, hash); + int r = chash_table_find(&iter, key, remove); + + if (r < 0) + return r; + + if (value && table->value_size) + memcpy(value, chash_iter_value(iter), table->value_size); + + if (remove) + chash_iter_set_invalid(iter); + + return iter.slot; +} +EXPORT_SYMBOL(__chash_table_copy_out); + +#ifdef CONFIG_CHASH_SELFTEST +/** + * chash_self_test - Run a self-test of the hash table implementation + * @bits: Table size will be 2^bits entries + * @key_size: Size of hash keys in bytes, 4 or 8 + * @min_fill: Minimum fill level during the test + * @max_fill: Maximum fill level during the test + * @iterations: Number of test iterations + * + * The test adds and removes entries from a hash table, cycling the + * fill level between min_fill and max_fill entries. Also tests lookup + * and value retrieval. + */ +static int __init chash_self_test(u8 bits, u8 key_size, + int min_fill, int max_fill, + u64 iterations) +{ + struct chash_table table; + int ret; + u64 add_count, rmv_count; + u64 value; + + if (key_size == 4 && iterations > 0xffffffff) + return -EINVAL; + if (min_fill >= max_fill) + return -EINVAL; + + ret = chash_table_alloc(&table, bits, key_size, sizeof(u64), + GFP_KERNEL); + if (ret) { + pr_err("chash_table_alloc failed: %d\n", ret); + return ret; + } + + for (add_count = 0, rmv_count = 0; add_count < iterations; + add_count++) { + /* When we hit the max_fill level, remove entries down + * to min_fill + */ + if (add_count - rmv_count == max_fill) { + u64 find_count = rmv_count; + + /* First try to find all entries that we're + * about to remove, confirm their value, test + * writing them back a second time. + */ + for (; add_count - find_count > min_fill; + find_count++) { + ret = chash_table_copy_out(&table, find_count, + &value); + if (ret < 0) { + pr_err("chash_table_copy_out failed: %d\n", + ret); + goto out; + } + if (value != ~find_count) { + pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n", + find_count, ~find_count, value); +#ifdef CHASH_DEBUG + chash_table_dump(&table.table); +#endif + ret = -EFAULT; + goto out; + } + ret = chash_table_copy_in(&table, find_count, + &value); + if (ret != 1) { + pr_err("copy_in second time returned %d, expected 1\n", + ret); + ret = -EFAULT; + goto out; + } + } + /* Remove them until we hit min_fill level */ + for (; add_count - rmv_count > min_fill; rmv_count++) { + ret = chash_table_remove(&table, rmv_count, + NULL); + if (ret < 0) { + pr_err("chash_table_remove failed: %d\n", + ret); + goto out; + } + } + } + + /* Add a new value */ + value = ~add_count; + ret = chash_table_copy_in(&table, add_count, &value); + if (ret != 0) { + pr_err("copy_in first time returned %d, expected 0\n", + ret); + ret = -EFAULT; + goto out; + } + } + + chash_table_dump_stats(&table); + chash_table_reset_stats(&table); + +out: + chash_table_free(&table); + return ret; +} + +static unsigned int chash_test_bits = 10; +MODULE_PARM_DESC(test_bits, + "Selftest number of hash bits ([4..20], default=10)"); +module_param_named(test_bits, chash_test_bits, uint, 0444); + +static unsigned int chash_test_keysize = 8; +MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)"); +module_param_named(test_keysize, chash_test_keysize, uint, 0444); + +static unsigned int chash_test_minfill; +MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)"); +module_param_named(test_minfill, chash_test_minfill, uint, 0444); + +static unsigned int chash_test_maxfill; +MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)"); +module_param_named(test_maxfill, chash_test_maxfill, uint, 0444); + +static unsigned long chash_test_iters; +MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)"); +module_param_named(test_iters, chash_test_iters, ulong, 0444); + +static int __init chash_init(void) +{ + int ret; + u64 ts1_ns; + + /* Skip self test on user errors */ + if (chash_test_bits < 4 || chash_test_bits > 20) { + pr_err("chash: test_bits out of range [4..20].\n"); + return 0; + } + if (chash_test_keysize != 4 && chash_test_keysize != 8) { + pr_err("chash: test_keysize invalid. Must be 4 or 8.\n"); + return 0; + } + + if (!chash_test_minfill) + chash_test_minfill = (1 << chash_test_bits) / 2; + if (!chash_test_maxfill) + chash_test_maxfill = (1 << chash_test_bits) * 4 / 5; + if (!chash_test_iters) + chash_test_iters = (1 << chash_test_bits) * 1000; + + if (chash_test_minfill >= (1 << chash_test_bits)) { + pr_err("chash: test_minfill too big. Must be < table size.\n"); + return 0; + } + if (chash_test_maxfill >= (1 << chash_test_bits)) { + pr_err("chash: test_maxfill too big. Must be < table size.\n"); + return 0; + } + if (chash_test_minfill >= chash_test_maxfill) { + pr_err("chash: test_minfill must be < test_maxfill.\n"); + return 0; + } + if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) { + pr_err("chash: test_iters must be < 4G for 4 byte keys.\n"); + return 0; + } + + ts1_ns = local_clock(); + ret = chash_self_test(chash_test_bits, chash_test_keysize, + chash_test_minfill, chash_test_maxfill, + chash_test_iters); + if (!ret) { + u64 ts_delta_us = local_clock() - ts1_ns; + u64 iters_per_second = (u64)chash_test_iters * 1000000; + + do_div(ts_delta_us, 1000); + do_div(iters_per_second, ts_delta_us); + pr_info("chash: self test took %llu us, %llu iterations/s\n", + ts_delta_us, iters_per_second); + } else { + pr_err("chash: self test failed: %d\n", ret); + } + + return ret; +} + +module_init(chash_init); + +#endif /* CONFIG_CHASH_SELFTEST */ + +MODULE_DESCRIPTION("Closed hash table"); +MODULE_LICENSE("GPL and additional rights"); From a2f14820e3493145c25095873d4a510a1b25efdc Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 26 Aug 2017 02:43:06 -0400 Subject: [PATCH 170/232] drm/amdgpu: Track pending retry faults in IH and VM (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IH tracks pending retry faults in a hash table for fast lookup in interrupt context. Each VM has a short FIFO of pending VM faults for processing in a bottom half. The IH prescreening stage adds retry faults and filters out repeated retry interrupts to minimize the impact of interrupt storms. It's the VM's responsibility remove pending faults once they are handled. For now this is only done when the VM is destroyed. v2: - Made the hash table smaller and the FIFO longer. I never want the FIFO to fill up, because that would make prescreen take longer. 128 pending page faults should be enough to keep migrations busy. Signed-off-by: Felix Kuehling Acked-by: Christian König (v1) Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 76 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 12 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +++ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 78 +++++++++++++++++++++++++- 6 files changed, 180 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1989c276138cc..7fb8492d8e63c 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -184,6 +184,7 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE + select CHASH help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index c834a40cfad6c..f5f27e4f0f7ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -196,3 +196,79 @@ int amdgpu_ih_process(struct amdgpu_device *adev) return IRQ_HANDLED; } + +/** + * amdgpu_ih_add_fault - Add a page fault record + * + * @adev: amdgpu device pointer + * @key: 64-bit encoding of PASID and address + * + * This should be called when a retry page fault interrupt is + * received. If this is a new page fault, it will be added to a hash + * table. The return value indicates whether this is a new fault, or + * a fault that was already known and is already being handled. + * + * If there are too many pending page faults, this will fail. Retry + * interrupts should be ignored in this case until there is enough + * free space. + * + * Returns 0 if the fault was added, 1 if the fault was already known, + * -ENOSPC if there are too many pending faults. + */ +int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key) +{ + unsigned long flags; + int r = -ENOSPC; + + if (WARN_ON_ONCE(!adev->irq.ih.faults)) + /* Should be allocated in _ih_sw_init on GPUs that + * support retry faults and require retry filtering. + */ + return r; + + spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); + + /* Only let the hash table fill up to 50% for best performance */ + if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1))) + goto unlock_out; + + r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL); + if (!r) + adev->irq.ih.faults->count++; + + /* chash_table_copy_in should never fail unless we're losing count */ + WARN_ON_ONCE(r < 0); + +unlock_out: + spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); + return r; +} + +/** + * amdgpu_ih_clear_fault - Remove a page fault record + * + * @adev: amdgpu device pointer + * @key: 64-bit encoding of PASID and address + * + * This should be called when a page fault has been handled. Any + * future interrupt with this key will be processed as a new + * page fault. + */ +void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key) +{ + unsigned long flags; + int r; + + if (!adev->irq.ih.faults) + return; + + spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); + + r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL); + if (!WARN_ON_ONCE(r < 0)) { + adev->irq.ih.faults->count--; + WARN_ON_ONCE(adev->irq.ih.faults->count < 0); + } + + spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 3de8e74e5b3a8..ada89358e2207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -24,6 +24,8 @@ #ifndef __AMDGPU_IH_H__ #define __AMDGPU_IH_H__ +#include + struct amdgpu_device; /* * vega10+ IH clients @@ -69,6 +71,13 @@ enum amdgpu_ih_clientid #define AMDGPU_IH_CLIENTID_LEGACY 0 +#define AMDGPU_PAGEFAULT_HASH_BITS 8 +struct amdgpu_retryfault_hashtable { + DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spinlock_t lock; + int count; +}; + /* * R6xx+ IH ring */ @@ -87,6 +96,7 @@ struct amdgpu_ih_ring { bool use_doorbell; bool use_bus_addr; dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ + struct amdgpu_retryfault_hashtable *faults; }; #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 @@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev); int amdgpu_ih_process(struct amdgpu_device *adev); +int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key); +void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9b795915cab1d..6c1133298b174 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2680,6 +2680,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->pasid = pasid; } + INIT_KFIFO(vm->faults); + return 0; error_free_root: @@ -2731,8 +2733,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; + u64 fault; int i; + /* Clear pending page faults from IH when the VM is destroyed */ + while (kfifo_get(&vm->faults, &fault)) + amdgpu_ih_clear_fault(adev, fault); + if (vm->pasid) { unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 7873dfa8c0f95..447ed6e7e5862 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -120,6 +120,10 @@ struct amdgpu_vm_pt { unsigned last_entry_used; }; +#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) +#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) +#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root va; @@ -160,6 +164,9 @@ struct amdgpu_vm { /* Flag to indicate ATS support from PTE for GFX9 */ bool pte_support_ats; + + /* Up to 128 pending page faults */ + DECLARE_KFIFO(faults, u64, 128); }; struct amdgpu_vm_id { diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index eda4771e273f5..dd6af2176d3e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -235,8 +235,73 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) */ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) { - /* TODO: Filter known pending page faults */ + u32 ring_index = adev->irq.ih.rptr >> 2; + u32 dw0, dw3, dw4, dw5; + u16 pasid; + u64 addr, key; + struct amdgpu_vm *vm; + int r; + + dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); + dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); + + /* Filter retry page faults, let only the first one pass. If + * there are too many outstanding faults, ignore them until + * some faults get cleared. + */ + switch (dw0 & 0xff) { + case AMDGPU_IH_CLIENTID_VMC: + case AMDGPU_IH_CLIENTID_UTCL2: + break; + default: + /* Not a VM fault */ + return true; + } + + /* Not a retry fault */ + if (!(dw5 & 0x80)) + return true; + + pasid = dw3 & 0xffff; + /* No PASID, can't identify faulting process */ + if (!pasid) + return true; + + addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); + key = AMDGPU_VM_FAULT(pasid, addr); + r = amdgpu_ih_add_fault(adev, key); + + /* Hash table is full or the fault is already being processed, + * ignore further page faults + */ + if (r != 0) + goto ignore_iv; + + /* Track retry faults in per-VM fault FIFO. */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + spin_unlock(&adev->vm_manager.pasid_lock); + if (WARN_ON_ONCE(!vm)) { + /* VM not found, process it normally */ + amdgpu_ih_clear_fault(adev, key); + return true; + } + /* No locking required with single writer and single reader */ + r = kfifo_put(&vm->faults, key); + if (!r) { + /* FIFO is full. Ignore it until there is space */ + amdgpu_ih_clear_fault(adev, key); + goto ignore_iv; + } + + /* It's the first fault for this address, process it normally */ return true; + +ignore_iv: + adev->irq.ih.rptr += 32; + return false; } /** @@ -323,6 +388,14 @@ static int vega10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1; + adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL); + if (!adev->irq.ih.faults) + return -ENOMEM; + INIT_CHASH_TABLE(adev->irq.ih.faults->hash, + AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spin_lock_init(&adev->irq.ih.faults->lock); + adev->irq.ih.faults->count = 0; + r = amdgpu_irq_init(adev); return r; @@ -335,6 +408,9 @@ static int vega10_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); + kfree(adev->irq.ih.faults); + adev->irq.ih.faults = NULL; + return 0; } From 19dde58929c04575aff0c61d99efaf023f6810b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 3 Jul 2017 19:49:55 +0200 Subject: [PATCH 171/232] drm/ttm: cleanup ttm_page_alloc_dma.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused defines and variables. Also stop computing the gfp_flags when they aren't used. No intended functional change. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 42 +++++++++--------------- 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 90ddbdca93bdb..06bc14b55e661 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -60,22 +60,15 @@ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 4 #define FREE_ALL_PAGES (~0U) -/* times are in msecs */ -#define IS_UNDEFINED (0) -#define IS_WC (1<<1) -#define IS_UC (1<<2) -#define IS_CACHED (1<<3) -#define IS_DMA32 (1<<4) enum pool_type { - POOL_IS_UNDEFINED, - POOL_IS_WC = IS_WC, - POOL_IS_UC = IS_UC, - POOL_IS_CACHED = IS_CACHED, - POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, - POOL_IS_UC_DMA32 = IS_UC | IS_DMA32, - POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32, + IS_UNDEFINED = 0, + IS_WC = 1 << 1, + IS_UC = 1 << 2, + IS_CACHED = 1 << 3, + IS_DMA32 = 1 << 4 }; + /* * The pool structure. There are usually six pools: * - generic (not restricted to DMA32): @@ -86,11 +79,9 @@ enum pool_type { * The other ones can be shrunk by the shrinker API if neccessary. * @pools: The 'struct device->dma_pools' link. * @type: Type of the pool - * @lock: Protects the inuse_list and free_list from concurrnet access. Must be + * @lock: Protects the free_list from concurrnet access. Must be * used with irqsave/irqrestore variants because pool allocator maybe called * from delayed work. - * @inuse_list: Pool of pages that are in use. The order is very important and - * it is in the order that the TTM pages that are put back are in. * @free_list: Pool of pages that are free to be used. No order requirements. * @dev: The device that is associated with these pools. * @size: Size used during DMA allocation. @@ -107,7 +98,6 @@ struct dma_pool { struct list_head pools; /* The 'struct device->dma_pools link */ enum pool_type type; spinlock_t lock; - struct list_head inuse_list; struct list_head free_list; struct device *dev; unsigned size; @@ -609,7 +599,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, sec_pool->pool = pool; INIT_LIST_HEAD(&pool->free_list); - INIT_LIST_HEAD(&pool->inuse_list); INIT_LIST_HEAD(&pool->pools); spin_lock_init(&pool->lock); pool->dev = dev; @@ -882,22 +871,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) struct dma_pool *pool; enum pool_type type; unsigned i; - gfp_t gfp_flags; int ret; if (ttm->state != tt_unpopulated) return 0; type = ttm_to_type(ttm->page_flags, ttm->caching_state); - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - gfp_flags = GFP_USER | GFP_DMA32; - else - gfp_flags = GFP_HIGHUSER; - if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - pool = ttm_dma_find_pool(dev, type); if (!pool) { + gfp_t gfp_flags; + + if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) + gfp_flags = GFP_USER | GFP_DMA32; + else + gfp_flags = GFP_HIGHUSER; + if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + pool = ttm_dma_pool_init(dev, gfp_flags, type); if (IS_ERR_OR_NULL(pool)) { return -ENOMEM; From e33dac39bcf9978061df95f1510b2ec6bfab07fc Mon Sep 17 00:00:00 2001 From: "Xiangliang.Yu" Date: Tue, 12 Sep 2017 17:31:46 +0800 Subject: [PATCH 172/232] drm/amdgpu/sdma3: Enable sdma wptr polling for SRIOV When hypervisor triggering FLR for one of VFs, need to enable sdma wptr polling to avoid missing wptr update if enabling doorbell. Signed-off-by: Xiangliang.Yu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b1de44f228249..b7d69ab9a94a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -641,10 +641,11 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; u32 wb_offset; u32 doorbell; + u64 wptr_gpu_addr; int i, j, r; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -707,6 +708,20 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) } WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i], + lower_32_bits(wptr_gpu_addr)); + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i], + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); + if (amdgpu_sriov_vf(adev)) + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); + else + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl); + /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); From 3e4b0bd96092bbf9c5a538916a61faa5eca8e9cb Mon Sep 17 00:00:00 2001 From: "Xiangliang.Yu" Date: Wed, 13 Sep 2017 10:58:19 +0800 Subject: [PATCH 173/232] drm/amdgpu/sdma3: set wptr shadow atomically Port it from sdma4 for wptr polling usage. Signed-off-by: Xiangliang.Yu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b7d69ab9a94a3..728c0d8e849b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -379,8 +379,10 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; if (ring->use_doorbell) { + u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs]; + /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2; + WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2); } else { int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; From f6e8b15af7c96a429b320eb8414791666c0bd2b7 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 3 Mar 2017 11:54:37 -0500 Subject: [PATCH 174/232] drm/amdgpu: remove the clearance of vce 4.0 interrupt mask Requested by SRIOV, the clearance of the bit moved into firmware Signed-off-by: Leo Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 11134d5f74433..75745544600af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -1011,10 +1011,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS), - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); - switch (entry->src_data[0]) { case 0: case 1: From 0b693f0b5612594d089e669804b1b6a62639779f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 19 Sep 2017 14:36:08 +0800 Subject: [PATCH 175/232] drm/amdgpu: fix checkpatch.pl warning to amdgpu_drv.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix checkpatch.pl WARNING: Prefer 'unsigned int' to bare use of 'unsigned' Reviewed-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d62a35ef26be0..ac026f5f56fb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -92,7 +92,7 @@ extern int amdgpu_dpm; extern int amdgpu_fw_load_type; extern int amdgpu_aspm; extern int amdgpu_runtime_pm; -extern unsigned amdgpu_ip_block_mask; +extern uint amdgpu_ip_block_mask; extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; @@ -105,14 +105,14 @@ extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_no_evict; extern int amdgpu_direct_gma_size; -extern unsigned amdgpu_pcie_gen_cap; -extern unsigned amdgpu_pcie_lane_cap; -extern unsigned amdgpu_cg_mask; -extern unsigned amdgpu_pg_mask; -extern unsigned amdgpu_sdma_phase_quantum; +extern uint amdgpu_pcie_gen_cap; +extern uint amdgpu_pcie_lane_cap; +extern uint amdgpu_cg_mask; +extern uint amdgpu_pg_mask; +extern uint amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; -extern unsigned amdgpu_pp_feature_mask; +extern uint amdgpu_pp_feature_mask; extern int amdgpu_vram_page_split; extern int amdgpu_ngg; extern int amdgpu_prim_buf_per_se; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 792b11795a816..dee35a9c57235 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -92,7 +92,7 @@ int amdgpu_dpm = -1; int amdgpu_fw_load_type = -1; int amdgpu_aspm = -1; int amdgpu_runtime_pm = -1; -unsigned amdgpu_ip_block_mask = 0xffffffff; +uint amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; int amdgpu_vm_size = -1; @@ -107,14 +107,14 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_no_evict = 0; int amdgpu_direct_gma_size = 0; -unsigned amdgpu_pcie_gen_cap = 0; -unsigned amdgpu_pcie_lane_cap = 0; -unsigned amdgpu_cg_mask = 0xffffffff; -unsigned amdgpu_pg_mask = 0xffffffff; -unsigned amdgpu_sdma_phase_quantum = 32; +uint amdgpu_pcie_gen_cap = 0; +uint amdgpu_pcie_lane_cap = 0; +uint amdgpu_cg_mask = 0xffffffff; +uint amdgpu_pg_mask = 0xffffffff; +uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -unsigned amdgpu_pp_feature_mask = 0xffffffff; +uint amdgpu_pp_feature_mask = 0xffffffff; int amdgpu_ngg = 0; int amdgpu_prim_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0; From a40cfa0bef9366ad156717078dae681305099a15 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 18 Sep 2017 07:14:56 -0400 Subject: [PATCH 176/232] drm/amd/amdgpu: Fold TTM debugfs entries into array (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher (v2): add domains and avoid strcmp --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 54 ++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8ee16dfdb8af3..50d20903de4f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1809,6 +1809,19 @@ static const struct file_operations amdgpu_ttm_gtt_fops = { #endif + + +static const struct { + char *name; + const struct file_operations *fops; + int domain; +} ttm_debugfs_entries[] = { + { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, +#endif +}; + #endif static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) @@ -1819,22 +1832,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) struct drm_minor *minor = adev->ddev->primary; struct dentry *ent, *root = minor->debugfs_root; - ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, - adev, &amdgpu_ttm_vram_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->mc.mc_vram_size); - adev->mman.vram = ent; - -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, - adev, &amdgpu_ttm_gtt_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->mc.gart_size); - adev->mman.gtt = ent; + for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { + ent = debugfs_create_file( + ttm_debugfs_entries[count].name, + S_IFREG | S_IRUGO, root, + adev, + ttm_debugfs_entries[count].fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) + i_size_write(ent->d_inode, adev->mc.mc_vram_size); + else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) + i_size_write(ent->d_inode, adev->mc.gart_size); + adev->mman.debugfs_entries[count] = ent; + } -#endif count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); #ifdef CONFIG_SWIOTLB @@ -1844,7 +1856,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); #else - return 0; #endif } @@ -1852,14 +1863,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) + unsigned i; - debugfs_remove(adev->mman.vram); - adev->mman.vram = NULL; - -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - debugfs_remove(adev->mman.gtt); - adev->mman.gtt = NULL; -#endif - + for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) + debugfs_remove(adev->mman.debugfs_entries[i]); #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 64709e041d5b2..7abae6867339c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -24,6 +24,7 @@ #ifndef __AMDGPU_TTM_H__ #define __AMDGPU_TTM_H__ +#include "amdgpu.h" #include "gpu_scheduler.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) @@ -45,8 +46,7 @@ struct amdgpu_mman { bool initialized; #if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; + struct dentry *debugfs_entries[8]; #endif /* buffer handling */ From 38290b2c456ade70db7cedf489332af5e4263fb0 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 18 Sep 2017 07:28:14 -0400 Subject: [PATCH 177/232] drm/amd/amdgpu: add support for iova_to_phys to replace TTM trace (v5) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher (v2): Add domain to iova debugfs (v3): Add true read/write methods to access system memory of pages mapped to the device (v4): Move get_domain call out of loop and return on error (v5): Just use kmap/kunmap --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 50d20903de4f4..c7f8e081a7724 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_trace.h" #include "bif/bif_4_1_d.h" @@ -1809,7 +1810,104 @@ static const struct file_operations amdgpu_ttm_gtt_fops = { #endif +static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result, n; + int r; + uint64_t phys; + void *ptr; + struct iommu_domain *dom; + + dom = iommu_get_domain_for_dev(adev->dev); + if (!dom) + return -EFAULT; + + result = 0; + while (size) { + // get physical address and map + phys = iommu_iova_to_phys(dom, *pos); + + // copy upto one page + if (size > PAGE_SIZE) + n = PAGE_SIZE; + else + n = size; + + // to end of the page + if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE) + n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1)); + + ptr = kmap(pfn_to_page(PFN_DOWN(phys))); + if (!ptr) + return -EFAULT; + + r = copy_to_user(buf, ptr, n); + kunmap(pfn_to_page(PFN_DOWN(phys))); + if (r) + return -EFAULT; + + *pos += n; + size -= n; + result += n; + } + + return result; +} + +static ssize_t amdgpu_iova_to_phys_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result, n; + int r; + uint64_t phys; + void *ptr; + struct iommu_domain *dom; + + dom = iommu_get_domain_for_dev(adev->dev); + if (!dom) + return -EFAULT; + + result = 0; + while (size) { + // get physical address and map + phys = iommu_iova_to_phys(dom, *pos); + // copy upto one page + if (size > PAGE_SIZE) + n = PAGE_SIZE; + else + n = size; + + // to end of the page + if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE) + n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1)); + + ptr = kmap(pfn_to_page(PFN_DOWN(phys))); + if (!ptr) + return -EFAULT; + + r = copy_from_user(ptr, buf, n); + kunmap(pfn_to_page(PFN_DOWN(phys))); + if (r) + return -EFAULT; + + *pos += n; + size -= n; + result += n; + } + + return result; +} + +static const struct file_operations amdgpu_ttm_iova_fops = { + .owner = THIS_MODULE, + .read = amdgpu_iova_to_phys_read, + .write = amdgpu_iova_to_phys_write, + .llseek = default_llseek +}; static const struct { char *name; @@ -1820,6 +1918,7 @@ static const struct { #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, #endif + { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM }, }; #endif From 79ba2800662bcfaef41269dd9389d0e9ed729a28 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 18 Sep 2017 08:10:00 -0400 Subject: [PATCH 178/232] drm/amd/amdgpu: remove usage of ttm trace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 36 +++---------------------- 1 file changed, 3 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c7f8e081a7724..0e5f78f3a97e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -704,22 +703,6 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) } } -static void amdgpu_trace_dma_map(struct ttm_tt *ttm) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - ttm_trace_dma_map(adev->dev, >t->ttm); -} - -static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - ttm_trace_dma_unmap(adev->dev, >t->ttm); -} - /* prepare the sg table with the user pages */ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) { @@ -746,8 +729,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - amdgpu_trace_dma_map(ttm); - return 0; release_sg: @@ -773,8 +754,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) amdgpu_ttm_tt_mark_user_pages(ttm); - amdgpu_trace_dma_unmap(ttm); - sg_free_table(ttm->sg); } @@ -958,7 +937,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (ttm->state != tt_unpopulated) @@ -978,22 +956,16 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; - r = 0; - goto trace_mappings; + return 0; } #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - r = ttm_dma_populate(>t->ttm, adev->dev); - goto trace_mappings; + return ttm_dma_populate(>t->ttm, adev->dev); } #endif - r = ttm_populate_and_map_pages(adev->dev, >t->ttm); -trace_mappings: - if (likely(!r)) - amdgpu_trace_dma_map(ttm); - return r; + return ttm_populate_and_map_pages(adev->dev, >t->ttm); } static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) @@ -1014,8 +986,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) adev = amdgpu_ttm_adev(ttm->bdev); - amdgpu_trace_dma_unmap(ttm); - #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(>t->ttm, adev->dev); From 4135d9f82cd4c8d956ccc85063e24c468dc6fa63 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 18 Sep 2017 08:13:28 -0400 Subject: [PATCH 179/232] drm/ttm: Remove TTM dma tracepoint since it's not required anymore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/Makefile | 2 +- drivers/gpu/drm/ttm/ttm_debug.c | 74 ----------------------- drivers/gpu/drm/ttm/ttm_trace.h | 87 --------------------------- drivers/gpu/drm/ttm/ttm_tracepoints.c | 45 -------------- 4 files changed, 1 insertion(+), 207 deletions(-) delete mode 100644 drivers/gpu/drm/ttm/ttm_debug.c delete mode 100644 drivers/gpu/drm/ttm/ttm_trace.h delete mode 100644 drivers/gpu/drm/ttm/ttm_tracepoints.c diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index ab2bef1219e55..4d0c938ff4b21 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,7 +4,7 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ - ttm_bo_manager.o ttm_page_alloc_dma.o ttm_debug.o ttm_tracepoints.o + ttm_bo_manager.o ttm_page_alloc_dma.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_debug.c b/drivers/gpu/drm/ttm/ttm_debug.c deleted file mode 100644 index ef5f0d0901545..0000000000000 --- a/drivers/gpu/drm/ttm/ttm_debug.c +++ /dev/null @@ -1,74 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2017 Advanced Micro Devices, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Tom St Denis - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "ttm_trace.h" - -void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt) -{ - unsigned i; - - if (unlikely(trace_ttm_dma_map_enabled())) { - for (i = 0; i < tt->ttm.num_pages; i++) { - trace_ttm_dma_map( - dev, - tt->ttm.pages[i], - tt->dma_address[i]); - } - } -} -EXPORT_SYMBOL(ttm_trace_dma_map); - -void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt) -{ - unsigned i; - - if (unlikely(trace_ttm_dma_unmap_enabled())) { - for (i = 0; i < tt->ttm.num_pages; i++) { - trace_ttm_dma_unmap( - dev, - tt->ttm.pages[i], - tt->dma_address[i]); - } - } -} -EXPORT_SYMBOL(ttm_trace_dma_unmap); - diff --git a/drivers/gpu/drm/ttm/ttm_trace.h b/drivers/gpu/drm/ttm/ttm_trace.h deleted file mode 100644 index 715ce68b7b333..0000000000000 --- a/drivers/gpu/drm/ttm/ttm_trace.h +++ /dev/null @@ -1,87 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2017 Advanced Micro Devices, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Tom St Denis - */ -#if !defined(_TTM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TTM_TRACE_H_ - -#include -#include -#include - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM ttm -#define TRACE_INCLUDE_FILE ttm_trace - -TRACE_EVENT(ttm_dma_map, - TP_PROTO(struct device *dev, struct page *page, dma_addr_t dma_address), - TP_ARGS(dev, page, dma_address), - TP_STRUCT__entry( - __string(device, dev_name(dev)) - __field(dma_addr_t, dma) - __field(phys_addr_t, phys) - ), - TP_fast_assign( - __assign_str(device, dev_name(dev)); - __entry->dma = dma_address; - __entry->phys = page_to_phys(page); - ), - TP_printk("%s: %pad => %pa", - __get_str(device), - &__entry->dma, - &__entry->phys) -); - -TRACE_EVENT(ttm_dma_unmap, - TP_PROTO(struct device *dev, struct page *page, dma_addr_t dma_address), - TP_ARGS(dev, page, dma_address), - TP_STRUCT__entry( - __string(device, dev_name(dev)) - __field(dma_addr_t, dma) - __field(phys_addr_t, phys) - ), - TP_fast_assign( - __assign_str(device, dev_name(dev)); - __entry->dma = dma_address; - __entry->phys = page_to_phys(page); - ), - TP_printk("%s: %pad => %pa", - __get_str(device), - &__entry->dma, - &__entry->phys) -); - -#endif - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/ttm/ -#include - diff --git a/drivers/gpu/drm/ttm/ttm_tracepoints.c b/drivers/gpu/drm/ttm/ttm_tracepoints.c deleted file mode 100644 index 861a6266822be..0000000000000 --- a/drivers/gpu/drm/ttm/ttm_tracepoints.c +++ /dev/null @@ -1,45 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2017 Advanced Micro Devices, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Tom St Denis - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include "ttm_trace.h" From 98512bb8c241f67f3a5cf0a121624b28a852db45 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Thu, 14 Sep 2017 16:25:19 +0800 Subject: [PATCH 180/232] drm/amdgpu: Add GPU reset functionality for Vega10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V2 Signed-off-by: Ken Wang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 21 ++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 3 ++ drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 7 +++++ drivers/gpu/drm/amd/amdgpu/psp_v10_0.h | 2 ++ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 34 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v3_1.h | 1 + drivers/gpu/drm/amd/amdgpu/soc15.c | 27 ++++++++--------- 8 files changed, 83 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b0109ebe0a1ba..36979e1d7b5f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2581,7 +2581,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || - (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { if (adev->ip_blocks[i].status.hang) { DRM_INFO("Some block need full reset!\n"); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 84bd6ed7a641c..a5b595a75d910 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -62,6 +62,7 @@ static int psp_sw_init(void *handle) psp->cmd_submit = psp_v3_1_cmd_submit; psp->compare_sram_data = psp_v3_1_compare_sram_data; psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; + psp->mode1_reset = psp_v3_1_mode1_reset; break; case CHIP_RAVEN: psp->init_microcode = psp_v10_0_init_microcode; @@ -72,6 +73,7 @@ static int psp_sw_init(void *handle) psp->ring_destroy = psp_v10_0_ring_destroy; psp->cmd_submit = psp_v10_0_cmd_submit; psp->compare_sram_data = psp_v10_0_compare_sram_data; + psp->mode1_reset = psp_v10_0_mode1_reset; break; default: return -EINVAL; @@ -497,6 +499,22 @@ static int psp_resume(void *handle) return ret; } +static bool psp_check_reset(void* handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->flags & AMD_IS_APU) + return true; + + return false; +} + +static int psp_reset(void* handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return psp_mode1_reset(&adev->psp); +} + static bool psp_check_fw_loading_status(struct amdgpu_device *adev, enum AMDGPU_UCODE_ID ucode_type) { @@ -540,8 +558,9 @@ const struct amd_ip_funcs psp_ip_funcs = { .suspend = psp_suspend, .resume = psp_resume, .is_idle = NULL, + .check_soft_reset = psp_check_reset, .wait_for_idle = NULL, - .soft_reset = NULL, + .soft_reset = psp_reset, .set_clockgating_state = psp_set_clockgating_state, .set_powergating_state = psp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 1b7d12d88720f..ce46545504165 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -76,6 +76,7 @@ struct psp_context struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); + int (*mode1_reset)(struct psp_context *psp); /* fence buffer */ struct amdgpu_bo *fw_pri_bo; @@ -139,6 +140,8 @@ struct amdgpu_psp_funcs { ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) +#define psp_mode1_reset(psp) \ + ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false) extern const struct amd_ip_funcs psp_ip_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 77cab1ff0254f..dea7c909ca5fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -407,3 +407,10 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp, return true; } + + +int psp_v10_0_mode1_reset(struct psp_context *psp) +{ + DRM_INFO("psp mode 1 reset not supported now! \n"); + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h index 3af3ad1320ff8..451e8308303f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h @@ -45,4 +45,6 @@ extern int psp_v10_0_cmd_submit(struct psp_context *psp, extern bool psp_v10_0_compare_sram_data(struct psp_context *psp, struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); + +extern int psp_v10_0_mode1_reset(struct psp_context *psp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index bcbe30dfff39f..cee5c396b2774 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -530,3 +530,37 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2); return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; } + +int psp_v3_1_mode1_reset(struct psp_context *psp) +{ + int ret; + uint32_t offset; + struct amdgpu_device *adev = psp->adev; + + offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); + + ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + + if (ret) { + DRM_INFO("psp is not working correctly before mode1 reset!\n"); + return -EINVAL; + } + + /*send the mode 1 reset command*/ + WREG32(offset, 0x70000); + + mdelay(1000); + + offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); + + ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + + if (ret) { + DRM_INFO("psp mode 1 reset failed!\n"); + return -EINVAL; + } + + DRM_INFO("psp mode1 reset succeed \n"); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h index 5af2231b7099d..b05dbada77517 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h @@ -53,4 +53,5 @@ extern bool psp_v3_1_compare_sram_data(struct psp_context *psp, struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp); +extern int psp_v3_1_mode1_reset(struct psp_context *psp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f2c3a49f73a00..245a18aeb3899 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -407,18 +407,27 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) +static int soc15_asic_reset(struct amdgpu_device *adev) { u32 i; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + dev_info(adev->dev, "GPU reset\n"); /* disable BM */ pci_clear_master(adev->pdev); - /* reset */ - amdgpu_pci_config_reset(adev); - udelay(100); + pci_save_state(adev->pdev); + + for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){ + adev->ip_blocks[i].version->funcs->soft_reset((void *)adev); + break; + } + } + + pci_restore_state(adev->pdev); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { @@ -430,14 +439,6 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) udelay(1); } -} - -static int soc15_asic_reset(struct amdgpu_device *adev) -{ - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - soc15_gpu_pci_config_reset(adev); - amdgpu_atombios_scratch_regs_engine_hung(adev, false); return 0; From ab5d6227b78be29acf0340da2673f662c0df9b2d Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 12 Sep 2017 14:33:29 +0800 Subject: [PATCH 181/232] drm/amdgpu/sriov:fix missing error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 79d9ab43d42c0..4fd06f8d9768b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -841,8 +841,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (amdgpu_sriov_vf(adev)) { r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); - if (r) + if (r) { + amdgpu_vm_fini(adev, &fpriv->vm); + kfree(fpriv); goto out_suspend; + } } mutex_init(&fpriv->bo_list_lock); From 7c3f2167b4b66a0994a643c8a3bcf01ec3433b8b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 14 Sep 2017 19:45:33 +0800 Subject: [PATCH 182/232] drm/amdgpu:no kiq in IH MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index dd6af2176d3e0..a3b30d84dbb3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -219,9 +219,9 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) wptr, adev->irq.ih.rptr, tmp); adev->irq.ih.rptr = tmp; - tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); + tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); + WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); } return (wptr & adev->irq.ih.ptr_mask); } From 3224a12b90f6bee789d2051b18a8249a82ad92d4 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 18:57:12 +0800 Subject: [PATCH 183/232] drm/amdgpu/sriov:move in_reset to adev and rename MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit currently in_reset is only used in sriov gpu reset, and it will be used for other non-gfx hw component later, like PSP, so move it from gfx to adev and rename to in_sriov_reset make more sense. Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ac026f5f56fb2..33e1d619d060a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1018,7 +1018,6 @@ struct amdgpu_gfx { /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; - bool in_reset; /* s3/s4 mask */ bool in_suspend; /* NGG */ @@ -1583,6 +1582,7 @@ struct amdgpu_device { /* record last mm index being written through WREG32*/ unsigned long last_mm_index; + bool in_sriov_reset; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 36979e1d7b5f0..506a26793ce6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2690,7 +2690,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) mutex_lock(&adev->virt.lock_reset); atomic_inc(&adev->gpu_reset_counter); - adev->gfx.in_reset = true; + adev->in_sriov_reset = true; /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -2801,7 +2801,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) dev_info(adev->dev, "GPU reset successed!\n"); } - adev->gfx.in_reset = false; + adev->in_sriov_reset = false; mutex_unlock(&adev->virt.lock_reset); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 666a1545e949a..fe603cc2c4a9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4811,7 +4811,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v8_0_kiq_setting(ring); - if (adev->gfx.in_reset) { /* for GPU_RESET case */ + if (adev->in_sriov_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -4848,7 +4848,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { + if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -4860,7 +4860,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ + } else if (adev->in_sriov_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 72dbf890c65b6..ecb9674bc8031 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2698,7 +2698,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v9_0_kiq_setting(ring); - if (adev->gfx.in_reset) { /* for GPU_RESET case */ + if (adev->in_sriov_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); @@ -2736,7 +2736,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { + if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -2748,7 +2748,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ + } else if (adev->in_sriov_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); From 55981bd2e88a0b6e572a3997098886e2f9b2121e Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 18:42:12 +0800 Subject: [PATCH 184/232] drm/amdgpu/sriov:don't load psp fw during gpu reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At least for SRIOV we found reload PSP fw during gpu reset cause PSP hang. Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a5b595a75d910..e02828665b14a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -255,15 +255,18 @@ static int psp_asd_load(struct psp_context *psp) static int psp_hw_start(struct psp_context *psp) { + struct amdgpu_device *adev = psp->adev; int ret; - ret = psp_bootloader_load_sysdrv(psp); - if (ret) - return ret; + if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { + ret = psp_bootloader_load_sysdrv(psp); + if (ret) + return ret; - ret = psp_bootloader_load_sos(psp); - if (ret) - return ret; + ret = psp_bootloader_load_sos(psp); + if (ret) + return ret; + } ret = psp_ring_create(psp, PSP_RING_TYPE__KM); if (ret) From 2ea6ab2741fc4caf9fd4a48de1b4946f09c365e0 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 9 Jun 2017 15:04:49 +0800 Subject: [PATCH 185/232] drm/amdgpu:insert TMZ_BEGIN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit FRAME_CONTROL(begin) is needed for vega10 due to ucode logic change, it can fix some CTS random fail under gfx preemption enabled mode. Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ecb9674bc8031..0a8072f73a3b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3764,6 +3764,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); } +static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); + amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ +} + static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) { uint32_t dw2 = 0; @@ -3771,6 +3777,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) if (amdgpu_sriov_vf(ring->adev)) gfx_v9_0_ring_emit_ce_meta(ring); + gfx_v9_0_ring_emit_tmz(ring, true); + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { /* set load_global_config & load_global_uconfig */ @@ -3821,12 +3829,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne ring->ring[offset] = (ring->ring_size>>2) - offset + cur; } -static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); - amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ -} - static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; From 1d4e0a8c4f12acbc0767f8f9fd75005b9125ada6 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 15:03:24 +0800 Subject: [PATCH 186/232] drm/amdgpu:hdp flush should be put it initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7ca9cbec3004f..99147f576e765 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -696,12 +696,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - /* After HDP is initialized, flush HDP.*/ - if (adev->flags & AMD_IS_APU) - nbio_v7_0_hdp_flush(adev); - else - nbio_v6_1_hdp_flush(adev); - switch (adev->asic_type) { case CHIP_RAVEN: mmhub_v1_0_initialize_power_gating(adev); @@ -724,6 +718,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); + /* After HDP is initialized, flush HDP.*/ + if (adev->flags & AMD_IS_APU) + nbio_v7_0_hdp_flush(adev); + else + nbio_v6_1_hdp_flush(adev); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; else From 6e2e216fadd80b4280783bb78e543593ebf2cb69 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 4 Jul 2017 15:43:38 +0800 Subject: [PATCH 187/232] drm/amdgpu:use formal register to trigger hdp invalidate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0a8072f73a3b4..31891149e6fcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3535,7 +3535,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { gfx_v9_0_write_data_to_reg(ring, 0, true, - SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1); + SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); } static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index fd7c72aaafa62..d5f3848af26ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -398,7 +398,7 @@ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE)); amdgpu_ring_write(ring, 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 23a85750edd6f..b8ed8faf20035 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1161,7 +1161,7 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) */ static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0)); + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0)); amdgpu_ring_write(ring, 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 21e7b88401e1e..1eb4d79d6e306 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -812,7 +812,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 */ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0)); + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0)); amdgpu_ring_write(ring, 1); } From 030308fcbd944348e5be079f4c2d53d2bda00d53 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 15:34:52 +0800 Subject: [PATCH 188/232] drm/amdgpu/sriov:fix page fault issue of driver unload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bo_free on csa is too late to put in amdgpu_fini because that time ttm is already finished, Move it earlier to avoid the page fault. Signed-off-by: Monk Liu Signed-off-by: Horace Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +--- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 506a26793ce6e..3e84ddf9e3b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1790,10 +1790,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = false; } - if (amdgpu_sriov_vf(adev)) { - amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); - } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fe603cc2c4a9b..0c4a3b8e85964 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2113,6 +2113,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_fini(adev); + amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 31891149e6fcd..f1d7c5d0f3851 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1425,6 +1425,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_fini(adev); + amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); gfx_v9_0_mec_fini(adev); gfx_v9_0_ngg_fini(adev); From 6e132ca0bb62b30c0eb053d99f75bb827f7876f5 Mon Sep 17 00:00:00 2001 From: Horace Chen Date: Wed, 28 Jun 2017 17:51:50 +0800 Subject: [PATCH 189/232] drm/amdgpu/sriov:increate mailbox polling timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit increase timeout to 12 seconds,because there may have multiple FLR waiting for done, the waiting time of events may be long, increase to 12s to reduce timeout failure. Signed-off-by: Horace Chen Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 1e91b9a1c5915..67e78576a9eb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -24,7 +24,7 @@ #ifndef __MXGPU_AI_H__ #define __MXGPU_AI_H__ -#define AI_MAILBOX_TIMEDOUT 5000 +#define AI_MAILBOX_TIMEDOUT 12000 enum idh_request { IDH_REQ_GPU_INIT_ACCESS = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index c791d73d2d542..f13dc6cc158f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -23,7 +23,7 @@ #ifndef __MXGPU_VI_H__ #define __MXGPU_VI_H__ -#define VI_MAILBOX_TIMEDOUT 5000 +#define VI_MAILBOX_TIMEDOUT 12000 #define VI_MAILBOX_RESET_TIME 12 /* VI mailbox messages request */ From f840cc5f8447db7efff447a25bcddbf084bd3e2e Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 16:58:08 +0800 Subject: [PATCH 190/232] drm/amdgpu/sriov:init csb for gfxv9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RLC need CSB registers initiated under SRIOV during world switch otherwise the clear state buffer behav will not be recovered to current VF scheme after switch back Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f1d7c5d0f3851..e2ae00df1d52b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2045,8 +2045,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) { int r; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + gfx_v9_0_init_csb(adev); return 0; + } gfx_v9_0_rlc_stop(adev); From eb01abc7c4fd1faa26d0787f410894d9c704eb60 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 13:40:31 +0800 Subject: [PATCH 191/232] drm/amdgpu:make ctx_add_fence interruptible(v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit otherwise a gpu hang will make application couldn't be killed under timedout=0 mode v2: Fix memoryleak job/job->s_fence issue unlock mn remove the ERROR msg after waiting being interrupted Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 ++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 12 +++++++----- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 33e1d619d060a..9cce59f6ada57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -735,8 +735,8 @@ struct amdgpu_ctx_mgr { struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); -uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence); +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t seq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9f1202a4182f6..c6a214f1e991e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1129,6 +1129,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; unsigned i; + uint64_t seq; + int r; amdgpu_mn_lock(p->mn); @@ -1158,10 +1160,20 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->fence_ctx = entity->fence_context; p->fence = dma_fence_get(&job->base.s_fence->finished); + r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); + if (r) { + dma_fence_put(p->fence); + dma_fence_put(&job->base.s_fence->finished); + amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); + return r; + } + amdgpu_cs_post_dependencies(p); - cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); - job->uf_sequence = cs->out.handle; + cs->out.handle = seq; + job->uf_sequence = seq; + amdgpu_job_free_resources(job); trace_amdgpu_cs_ioctl(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a11e44340b239..75c933b1a4326 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -246,8 +246,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence) +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct dma_fence *fence, uint64_t* handler) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; uint64_t seq = cring->sequence; @@ -258,9 +258,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, other = cring->fences[idx]; if (other) { signed long r; - r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT); if (r < 0) - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + return r; } dma_fence_get(fence); @@ -271,8 +271,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, spin_unlock(&ctx->ring_lock); dma_fence_put(other); + if (handler) + *handler = seq; - return seq; + return 0; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, From d59c026b7be1dd9ae00b54d1916a90775fe3bdda Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 14:35:09 +0800 Subject: [PATCH 192/232] drm/amdgpu/sriov:fix memory leak after gpu reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GPU reset will require all hw doing hw_init thus ucode_init_bo will be invoked again, which lead to memory leak skip the fw_buf allocation during sriov gpu reset to avoid memory leak. Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 64 +++++++++++------------ 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9cce59f6ada57..f085c8c9f267e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1187,6 +1187,9 @@ struct amdgpu_firmware { /* gpu info firmware data pointer */ const struct firmware *gpu_info_fw; + + void *fw_buf_ptr; + uint64_t fw_buf_mc; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f306374ff6547..65649026b836c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -360,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, int amdgpu_ucode_init_bo(struct amdgpu_device *adev) { struct amdgpu_bo **bo = &adev->firmware.fw_buf; - uint64_t fw_mc_addr; - void *fw_buf_ptr = NULL; uint64_t fw_offset = 0; int i, err; struct amdgpu_firmware_info *ucode = NULL; @@ -372,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) return 0; } - err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, - amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, 0, bo); - if (err) { - dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); - goto failed; - } + if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { + err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, + amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, + NULL, NULL, 0, bo); + if (err) { + dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); + goto failed; + } - err = amdgpu_bo_reserve(*bo, false); - if (err) { - dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); - goto failed_reserve; - } + err = amdgpu_bo_reserve(*bo, false); + if (err) { + dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); + goto failed_reserve; + } - err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, - &fw_mc_addr); - if (err) { - dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); - goto failed_pin; - } + err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, + &adev->firmware.fw_buf_mc); + if (err) { + dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); + goto failed_pin; + } - err = amdgpu_bo_kmap(*bo, &fw_buf_ptr); - if (err) { - dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); - goto failed_kmap; - } + err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr); + if (err) { + dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); + goto failed_kmap; + } - amdgpu_bo_unreserve(*bo); + amdgpu_bo_unreserve(*bo); + } - memset(fw_buf_ptr, 0, adev->firmware.fw_size); + memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size); /* * if SMU loaded firmware, it needn't add SMC, UVD, and VCE @@ -421,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ucode = &adev->firmware.ucode[i]; if (ucode->fw) { header = (const struct common_firmware_header *)ucode->fw->data; - amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset, - (void *)((uint8_t *)fw_buf_ptr + fw_offset)); + amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset, + adev->firmware.fw_buf_ptr + fw_offset); if (i == AMDGPU_UCODE_ID_CP_MEC1 && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { const struct gfx_firmware_header_v1_0 *cp_hdr; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, - fw_buf_ptr + fw_offset); + amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset, + adev->firmware.fw_buf_ptr + fw_offset); fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); } fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE); From 6fe85429575c4d1f468e3b1715bd8643991570b1 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 20 Sep 2017 10:53:39 +0800 Subject: [PATCH 193/232] drm/amd/amgpu: update raven sdma golden setting Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index d5f3848af26ae..9206d39cb41ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -89,7 +89,7 @@ static const u32 golden_settings_sdma_vg10[] = { static const u32 golden_settings_sdma_4_1[] = { - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07, + SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, From a49ccdbd1d70005049647ac1f8e8989c54b41e63 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 20 Sep 2017 10:55:44 +0800 Subject: [PATCH 194/232] drm/amd/amgpu: update vega10 sdma golden setting Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9206d39cb41ba..3524060f8480e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -54,7 +54,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev); static const u32 golden_settings_sdma_4[] = { - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07, + SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100, SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000, From 10cfafd62af4295abc73cbf4531654d4f335ff15 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 19 Sep 2017 11:29:04 -0400 Subject: [PATCH 195/232] drm/amd/amdgpu: Partial revert of iova debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We discovered that on some devices even with iommu enabled you can access all of system memory through the iommu translation. Therefore, we revert the read method to the translation only service and drop the write method completely. Signed-off-by: Tom St Denis Reviewed-by: Christan König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 90 ++++--------------------- 1 file changed, 13 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0e5f78f3a97e5..ce435dbbb398b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1784,98 +1784,34 @@ static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result, n; int r; uint64_t phys; - void *ptr; struct iommu_domain *dom; - dom = iommu_get_domain_for_dev(adev->dev); - if (!dom) - return -EFAULT; - - result = 0; - while (size) { - // get physical address and map - phys = iommu_iova_to_phys(dom, *pos); - - // copy upto one page - if (size > PAGE_SIZE) - n = PAGE_SIZE; - else - n = size; - - // to end of the page - if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE) - n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1)); - - ptr = kmap(pfn_to_page(PFN_DOWN(phys))); - if (!ptr) - return -EFAULT; - - r = copy_to_user(buf, ptr, n); - kunmap(pfn_to_page(PFN_DOWN(phys))); - if (r) - return -EFAULT; - - *pos += n; - size -= n; - result += n; - } - - return result; -} + // always return 8 bytes + if (size != 8) + return -EINVAL; -static ssize_t amdgpu_iova_to_phys_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result, n; - int r; - uint64_t phys; - void *ptr; - struct iommu_domain *dom; + // only accept page addresses + if (*pos & 0xFFF) + return -EINVAL; dom = iommu_get_domain_for_dev(adev->dev); - if (!dom) - return -EFAULT; - - result = 0; - while (size) { - // get physical address and map + if (dom) phys = iommu_iova_to_phys(dom, *pos); + else + phys = *pos; - // copy upto one page - if (size > PAGE_SIZE) - n = PAGE_SIZE; - else - n = size; - - // to end of the page - if (((*pos & (PAGE_SIZE - 1)) + n) >= PAGE_SIZE) - n = PAGE_SIZE - (*pos & (PAGE_SIZE - 1)); - - ptr = kmap(pfn_to_page(PFN_DOWN(phys))); - if (!ptr) - return -EFAULT; - - r = copy_from_user(ptr, buf, n); - kunmap(pfn_to_page(PFN_DOWN(phys))); - if (r) - return -EFAULT; - - *pos += n; - size -= n; - result += n; - } + r = copy_to_user(buf, &phys, 8); + if (r) + return -EFAULT; - return result; + return 8; } static const struct file_operations amdgpu_ttm_iova_fops = { .owner = THIS_MODULE, .read = amdgpu_iova_to_phys_read, - .write = amdgpu_iova_to_phys_write, .llseek = default_llseek }; From 4d1f9fb721fdfa7789515167d1bd2e42cf87e12e Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 15 Sep 2017 16:33:38 -0400 Subject: [PATCH 196/232] drm/amdgpu: add cgs query info of pci bus devfn Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 +++ drivers/gpu/drm/amd/include/cgs_common.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index e521920515076..383204e911a4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -875,6 +875,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: sys_info->value = adev->pdev->subsystem_vendor; break; + case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN: + sys_info->value = adev->pdev->devfn; + break; default: return -ENODEV; } diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 2c1f13e04726e..030b14649c4ee 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -100,6 +100,7 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_GFX_SE_INFO, CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID, CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID, + CGS_SYSTEM_INFO_PCIE_BUS_DEVFN, CGS_SYSTEM_INFO_ID_MAXIMUM, }; From 2a5b64c9fcd7adf6133e76966250ef3ab139f98b Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 15 Sep 2017 16:38:49 -0400 Subject: [PATCH 197/232] drm/amd/powerplay: add register thermal interrupt in hwmgr_hw_init Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 75 ++++++++++++++++++++- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 6 ++ 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 8770860de644b..3f7cf559c81f8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -26,8 +26,8 @@ #include #include #include +#include #include -#include "cgs_common.h" #include "power_state.h" #include "hwmgr.h" #include "pppcielanes.h" @@ -51,6 +51,75 @@ uint8_t convert_to_vid(uint16_t vddc) return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); } +static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr, + struct cgs_system_info *sys_info) +{ + sys_info->size = sizeof(struct cgs_system_info); + sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN; + + return cgs_query_system_info(hwmgr->device, sys_info); +} + +static int phm_thermal_l2h_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static int phm_thermal_h2l_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static int phm_ctf_irq(void *private_data, + unsigned src_id, const uint32_t *iv_entry) +{ + struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data; + struct cgs_system_info sys_info = {0}; + int result; + + result = phm_get_pci_bus_devfn(hwmgr, &sys_info); + if (result) + return -EINVAL; + + pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n", + PCI_BUS_NUM(sys_info.value), + PCI_SLOT(sys_info.value), + PCI_FUNC(sys_info.value)); + return 0; +} + +static const struct cgs_irq_src_funcs thermal_irq_src[3] = { + {NULL, phm_thermal_l2h_irq}, + {NULL, phm_thermal_h2l_irq}, + {NULL, phm_ctf_irq} +}; + int hwmgr_early_init(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; @@ -179,6 +248,10 @@ int hwmgr_hw_init(struct pp_instance *handle) if (ret) goto err2; + ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src); + if (ret) + goto err2; + return 0; err2: if (hwmgr->hwmgr_func->backend_fini) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index f4b6f0ebda754..fa83e69ba9e13 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -32,6 +32,7 @@ #include "ppatomctrl.h" #include "hwmgr_ppt.h" #include "power_state.h" +#include "cgs_linux.h" struct pp_instance; struct pp_hwmgr; @@ -746,6 +747,11 @@ struct pp_hwmgr { bool en_umd_pstate; }; +struct cgs_irq_src_funcs { + cgs_irq_source_set_func_t set; + cgs_irq_handler_func_t handler; +}; + extern int hwmgr_early_init(struct pp_instance *handle); extern int hwmgr_hw_init(struct pp_instance *handle); extern int hwmgr_hw_fini(struct pp_instance *handle); From a1665a55c87eb711d23806a56e39e8116f1f4242 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 15 Sep 2017 16:43:38 -0400 Subject: [PATCH 198/232] drm/amd/powerplay: implement register thermal interrupt for Vega10 Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index bd20d551e7198..439cb371c56ab 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4994,6 +4994,38 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return 0; } +static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *info) +{ + struct cgs_irq_src_funcs *irq_src = + (struct cgs_irq_src_funcs *)info; + + if (hwmgr->thermal_controller.ucType == + ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 || + hwmgr->thermal_controller.ucType == + ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0xf, /* AMDGPU_IH_CLIENTID_THM */ + 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr), + "Failed to register high thermal interrupt!", + return -EINVAL); + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0xf, /* AMDGPU_IH_CLIENTID_THM */ + 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr), + "Failed to register low thermal interrupt!", + return -EINVAL); + } + + /* Register CTF(GPIO_19) interrupt */ + PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device, + 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */ + 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr), + "Failed to register CTF thermal interrupt!", + return -EINVAL); + + return 0; +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -5047,6 +5079,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_mclk_od = vega10_get_mclk_od, .set_mclk_od = vega10_set_mclk_od, .avfs_control = vega10_avfs_enable, + .register_internal_thermal_interrupt = vega10_register_thermal_interrupt, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) From fafa3598401469c22910cf74df6de6c5b318a482 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 19 Sep 2017 13:32:10 -0400 Subject: [PATCH 199/232] drm/amd/powerplay: change alert temperature range Change to more meaningful range that triggers thermal interrupts. Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 7462f9562b890..f31d2cf91f1aa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -27,8 +27,8 @@ #include "power_state.h" -#define TEMP_RANGE_MIN (90 * 1000) -#define TEMP_RANGE_MAX (120 * 1000) +#define TEMP_RANGE_MIN (0) +#define TEMP_RANGE_MAX (80 * 1000) #define PHM_FUNC_CHECK(hw) \ do { \ From 5c58301856e69223b85546fac319363eef1ac1b9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 20 Sep 2017 16:25:40 +0800 Subject: [PATCH 200/232] drm/amd/amdgpu: add vega10/raven mmhub/athub golden settings Signed-off-by: Evan Quan Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 99147f576e765..621699331e090 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -32,6 +32,8 @@ #include "vega10/DC/dce_12_0_offset.h" #include "vega10/DC/dce_12_0_sh_mask.h" #include "vega10/vega10_enum.h" +#include "vega10/MMHUB/mmhub_1_0_offset.h" +#include "vega10/ATHUB/athub_1_0_offset.h" #include "soc15_common.h" @@ -71,6 +73,18 @@ static const u32 golden_settings_vega10_hdp[] = 0xf6e, 0x0fffffff, 0x00000000, }; +static const u32 golden_settings_mmhub_1_0_0[] = +{ + SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa, + SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565 +}; + +static const u32 golden_settings_athub_1_0_0[] = +{ + SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800, + SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008 +}; + static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -665,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA10: + amdgpu_program_register_sequence(adev, + golden_settings_mmhub_1_0_0, + (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0)); + amdgpu_program_register_sequence(adev, + golden_settings_athub_1_0_0, + (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0)); break; case CHIP_RAVEN: + amdgpu_program_register_sequence(adev, + golden_settings_athub_1_0_0, + (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0)); break; default: break; From 6042e8560ef448aa1a9ca16473f0a5a264462f41 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 21 Sep 2017 10:34:48 +0800 Subject: [PATCH 201/232] drm/amd/powerplay: refine phm_register_thermal_interrupt interface currently, not all asics implement this callback function so not return error to avoid powerplay initialize failed in those asices Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index f31d2cf91f1aa..623cff90233d4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -210,10 +210,10 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info) { PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL) - return -EINVAL; + if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL) + return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info); - return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info); + return 0; } /** From fd4495e57cd2594098493a0e5b6beccae8cbf47b Mon Sep 17 00:00:00 2001 From: "Xiangliang.Yu" Date: Thu, 21 Sep 2017 10:19:49 +0800 Subject: [PATCH 202/232] drm/amdgpu: Fix driver reloading failure SRIOV doesn't implement PMC capability of PCIe, so it can't update power state by reading PMC register. Currently, amdgpu driver doesn't disable pci device when removing driver, the enable_cnt of pci device will not be decrease to 0. When reloading driver, pci_enable_device will do nothing as enable_cnt is not zero. And power state will not be updated as PMC is not support. So current_state of pci device is not D0 state and pci_enable_msi return fail. Add pci_disable_device when remmoving driver to fix the issue. Signed-off-by: Xiangliang.Yu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index dee35a9c57235..91e42b60d66b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -609,6 +609,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) drm_dev_unregister(dev); drm_dev_unref(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); } static void From a8ffeac96daa66132d44a624c1cf3cbcc2598a34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 18 Sep 2017 14:32:38 +0200 Subject: [PATCH 203/232] drm/amdgpu: use 2MB fragment size for GFX6,7 and 8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use 2MB fragment size by default for older hardware generations as well. Signed-off-by: Christian König Acked-by: John Bridgman Reviewed-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 5be9c83dfcf7d..2d1f3f651e1f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - amdgpu_vm_adjust_size(adev, 64, 4); + amdgpu_vm_adjust_size(adev, 64, 9); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index eace9e7182c8a..2256277d102f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64, 4); + amdgpu_vm_adjust_size(adev, 64, 9); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 3b3326daf32b9..114671b57004f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64, 4); + amdgpu_vm_adjust_size(adev, 64, 9); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask From dfe5c2b76b2a32cd37283809737e55f9208f8346 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Mon, 18 Sep 2017 14:25:31 -0400 Subject: [PATCH 204/232] drm/amdgpu: Correct bytes limit for SDMA 3.0 copy and fill MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Yong Zhao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 728c0d8e849b8..4858c9974c862 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1730,11 +1730,11 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, } static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { - .copy_max_bytes = 0x1fffff, + .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ .copy_num_dw = 7, .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, - .fill_max_bytes = 0x1fffff, + .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ .fill_num_dw = 5, .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, }; From 7bdc53f925af085ffa0580f10489f82b36cc2f1c Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 15 Sep 2017 18:20:37 -0400 Subject: [PATCH 205/232] drm/amdgpu: Fix a bug in amdgpu_fill_buffer() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When max_bytes is not 8 bytes aligned and bo size is larger than max_bytes, the last 8 bytes in a ttm node may be left unchanged. For example, on pre SDMA 4.0, max_bytes = 0x1fffff, and the bo size is 0x200000, the problem will happen. In order to fix the problem, we separately store the max nums of PTEs/PDEs a single operation can set in amdgpu_vm_pte_funcs structure, rather than inferring it from bytes limit of SDMA constant fill, i.e. fill_max_bytes. Together with the fix, we replace the hard code value "10" in amdgpu_vm_bo_update_mapping() with the corresponding values from structure amdgpu_vm_pte_funcs. Signed-off-by: Yong Zhao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 3 +++ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 3 +++ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 +++ drivers/gpu/drm/amd/amdgpu/si_dma.c | 3 +++ 8 files changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f085c8c9f267e..1bf42a8ef23e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -302,6 +302,13 @@ struct amdgpu_vm_pte_funcs { void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr); + + /* maximum nums of PTEs/PDEs in a single operation */ + uint32_t set_max_nums_pte_pde; + + /* number of dw to reserve per operation */ + unsigned set_pte_pde_num_dw; + /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ce435dbbb398b..1086f039d8e3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1527,8 +1527,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ - uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + uint32_t max_bytes = 8 * + adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct drm_mm_node *mm_node; @@ -1560,8 +1560,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ++mm_node; } - /* 10 double words for each SDMA_OP_PTEPDE cmd */ - num_dw = num_loops * 10; + /* num of dwords for each SDMA_OP_PTEPDE cmd */ + num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; /* for IB padding */ num_dw += 64; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6c1133298b174..28d16781377f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1606,10 +1606,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, } else { /* set page commands needed */ - ndw += ncmds * 10; + ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; /* extra commands for begin/end fragments */ - ndw += 2 * 10 * adev->vm_manager.fragment_size; + ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw + * adev->vm_manager.fragment_size; params.func = amdgpu_vm_do_set_ptes; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index f508f4d01e4a9..c64dcd1883b5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -1389,6 +1389,9 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { .copy_pte = cik_sdma_vm_copy_pte, .write_pte = cik_sdma_vm_write_pte, + + .set_max_nums_pte_pde = 0x1fffff >> 3, + .set_pte_pde_num_dw = 10, .set_pte_pde = cik_sdma_vm_set_pte_pde, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f2d0710258cb2..c05eb74d3404c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -1326,6 +1326,9 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { .copy_pte = sdma_v2_4_vm_copy_pte, .write_pte = sdma_v2_4_vm_write_pte, + + .set_max_nums_pte_pde = 0x1fffff >> 3, + .set_pte_pde_num_dw = 10, .set_pte_pde = sdma_v2_4_vm_set_pte_pde, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 4858c9974c862..2079340656d21 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1750,6 +1750,10 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { .copy_pte = sdma_v3_0_vm_copy_pte, .write_pte = sdma_v3_0_vm_write_pte, + + /* not 0x3fffff due to HW limitation */ + .set_max_nums_pte_pde = 0x3fffe0 >> 3, + .set_pte_pde_num_dw = 10, .set_pte_pde = sdma_v3_0_vm_set_pte_pde, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 3524060f8480e..2605faf56dff7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1716,6 +1716,9 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { .copy_pte = sdma_v4_0_vm_copy_pte, .write_pte = sdma_v4_0_vm_write_pte, + + .set_max_nums_pte_pde = 0x400000 >> 3, + .set_pte_pde_num_dw = 10, .set_pte_pde = sdma_v4_0_vm_set_pte_pde, }; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 112969f3301a9..adb6ae7d63ef0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -889,6 +889,9 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { .copy_pte = si_dma_vm_copy_pte, .write_pte = si_dma_vm_write_pte, + + .set_max_nums_pte_pde = 0xffff8 >> 3, + .set_pte_pde_num_dw = 9, .set_pte_pde = si_dma_vm_set_pte_pde, }; From e6d921974a51e607515b39baa8d1c3f1a27d008b Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 19 Sep 2017 12:58:15 -0400 Subject: [PATCH 206/232] drm/amdgpu: Add copy_pte_num_dw member in amdgpu_vm_pte_funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use it to replace the hard coded value in amdgpu_vm_bo_update_mapping(). Signed-off-by: Yong Zhao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/si_dma.c | 2 ++ 7 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1bf42a8ef23e2..7c43add4e4444 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -294,10 +294,14 @@ struct amdgpu_buffer_funcs { /* provided by hw blocks that can write ptes, e.g., sdma */ struct amdgpu_vm_pte_funcs { + /* number of dw to reserve per operation */ + unsigned copy_pte_num_dw; + /* copy pte entries from GART */ void (*copy_pte)(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count); + /* write pte one entry at a time with addr mapping */ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 28d16781377f6..8fcc743dfa867 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1597,7 +1597,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (pages_addr) { /* copy commands needed */ - ndw += ncmds * 7; + ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; /* and also PTEs */ ndw += nptes * 2; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c64dcd1883b5d..60cecd117705b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -1387,7 +1387,9 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = cik_sdma_vm_copy_pte, + .write_pte = cik_sdma_vm_write_pte, .set_max_nums_pte_pde = 0x1fffff >> 3, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index c05eb74d3404c..acdee3a4602c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -1324,7 +1324,9 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = sdma_v2_4_vm_copy_pte, + .write_pte = sdma_v2_4_vm_write_pte, .set_max_nums_pte_pde = 0x1fffff >> 3, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2079340656d21..72f31cc7df00e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1748,7 +1748,9 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = sdma_v3_0_vm_copy_pte, + .write_pte = sdma_v3_0_vm_write_pte, /* not 0x3fffff due to HW limitation */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2605faf56dff7..61572c5d19efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1714,7 +1714,9 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { + .copy_pte_num_dw = 7, .copy_pte = sdma_v4_0_vm_copy_pte, + .write_pte = sdma_v4_0_vm_write_pte, .set_max_nums_pte_pde = 0x400000 >> 3, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index adb6ae7d63ef0..3fa2fbf8c9a18 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -887,7 +887,9 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { + .copy_pte_num_dw = 5, .copy_pte = si_dma_vm_copy_pte, + .write_pte = si_dma_vm_write_pte, .set_max_nums_pte_pde = 0xffff8 >> 3, From 4bd9a67e17b9a2c1b0ca55e7dfc5a711c161373d Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 4 Jul 2017 16:40:58 +0800 Subject: [PATCH 207/232] drm/amdgpu:halt when vm fault MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit only with this way we can debug the VMC page fault issue Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6c8040e616c4e..c17996e18086f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 7ff70762cfc82..cc21c4bdec275 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); } From 85f95ad629558b65ab27cce583c683fb9e3da35c Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 21 Sep 2017 14:59:40 +0800 Subject: [PATCH 208/232] drm/amdgpu:unmap KCQ in gfx hw_fini(v2) v2: move kcq_disable out of SRIOV, make it genearal Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 57 ++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 58 +++++++++++++++++++++++++++ 2 files changed, 115 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0c4a3b8e85964..dfc10b1baea0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5034,12 +5034,69 @@ static int gfx_v8_0_hw_init(void *handle) return r; } +static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint32_t scratch, tmp = 0; + int r, i; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("Failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + + r = amdgpu_ring_alloc(kiq_ring, 10); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + + /* unmap queues */ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + /* write to scratch for completion */ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(kiq_ring, 0xDEADBEEF); + amdgpu_ring_commit(kiq_ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i >= adev->usec_timeout) { + DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + static int gfx_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + /* disable KCQ to avoid CPC touch memory not valid anymore */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) + gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); + if (amdgpu_sriov_vf(adev)) { pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e2ae00df1d52b..9945218a54894 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2895,12 +2895,70 @@ static int gfx_v9_0_hw_init(void *handle) return r; } +static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint32_t scratch, tmp = 0; + int r, i; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("Failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + + r = amdgpu_ring_alloc(kiq_ring, 10); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + + /* unmap queues */ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + /* write to scratch for completion */ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(kiq_ring, 0xDEADBEEF); + amdgpu_ring_commit(kiq_ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i >= adev->usec_timeout) { + DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + + static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + /* disable KCQ to avoid CPC touch memory not valid anymore */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) + gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); + if (amdgpu_sriov_vf(adev)) { pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; From beb841028480a13a664f868688102f3cee762a6b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 21 Sep 2017 15:10:06 +0800 Subject: [PATCH 209/232] drm/amdgpu/sriov:alloc KIQ MQD in VRAM(v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this way after KIQ MQD released in drv unloading, CPC can still let KIQ access this MQD thus RLCV SAVE_VF will not fail v2: always use VRAM domain for KIQ MQD no matter BM or SRIOV Signed-off-by: Monk Liu Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 4f6c68fc1dd91..4fcd98e65998c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -260,8 +260,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, /* create MQD for KIQ */ ring = &adev->gfx.kiq.ring; if (!ring->mqd_obj) { + /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must + * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD + * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for + * KIQ MQD no matter SRIOV or Bare-metal + */ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) { dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); From 4ff184d70e1dd85cc39f3b8ed0d98b728d6d9b6c Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 15 Sep 2017 16:43:01 +0800 Subject: [PATCH 210/232] drm/amdgpu:fix uvd ring fini routine(v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix missing finish uvd enc_ring. v2: since the adev pointer check in already in ring_fini so drop the check outsider Signed-off-by: Monk Liu Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 331e34ac61fda..e8bd50cf97857 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + int i; kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); @@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->uvd.ring); + for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) + amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + release_firmware(adev->uvd.fw); return 0; From c833d8aa4d829e858f1be8f4bd82a1503b611013 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 19 Sep 2017 16:09:53 +0800 Subject: [PATCH 211/232] drm/amdgpu:fix firmware memoryleak(v2) this fix memory leak due to request_firmware after driver unloaded v2: release gmc firmware for gmc6/7/8 as well Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 +++++ 6 files changed, 37 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e02828665b14a..447d446b50150 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -92,6 +92,12 @@ static int psp_sw_init(void *handle) static int psp_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9945218a54894..deeaee1457efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -384,6 +384,25 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) return r; } + +static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -1429,6 +1448,7 @@ static int gfx_v9_0_sw_fini(void *handle) gfx_v9_0_mec_fini(adev); gfx_v9_0_ngg_fini(adev); + gfx_v9_0_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 2d1f3f651e1f5..f4603a7c8ef32 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle) gmc_v6_0_gart_fini(adev); amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 2256277d102f3..b0528ca9207b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle) gmc_v7_0_gart_fini(adev); amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 114671b57004f..f368cfe2f5851 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle) gmc_v8_0_gart_fini(adev); amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 61572c5d19efd..c26d205ff3bf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1264,6 +1264,11 @@ static int sdma_v4_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } + return 0; } From e9c7577c09b5062a4f1464b8ef4472c14bdfd2e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 11 Sep 2017 17:29:26 +0200 Subject: [PATCH 212/232] drm/amdgpu: simplify pinning into visible VRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just set the CPU access required flag when we pin it. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 23 ++++++++-------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 726a662f43f49..6982baeccd149 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -635,7 +635,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r, i; - unsigned fpfn, lpfn; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; @@ -667,22 +666,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + /* force to pin into visible video ram */ + if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; amdgpu_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { - /* force to pin into visible video ram */ - if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && - !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && - (!max_offset || max_offset > - adev->mc.visible_vram_size)) { - if (WARN_ON_ONCE(min_offset > - adev->mc.visible_vram_size)) - return -EINVAL; - fpfn = min_offset >> PAGE_SHIFT; - lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - } else { - fpfn = min_offset >> PAGE_SHIFT; - lpfn = max_offset >> PAGE_SHIFT; - } + unsigned fpfn, lpfn; + + fpfn = min_offset >> PAGE_SHIFT; + lpfn = max_offset >> PAGE_SHIFT; + if (fpfn > bo->placements[i].fpfn) bo->placements[i].fpfn = fpfn; if (!bo->placements[i].lpfn || From d3f8c0abf45866d0e474181e147594d3da15834d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 11:22:56 +0800 Subject: [PATCH 213/232] drm/amd/powerplay: refine interface in struct pp_smumgr_func unify to use struct hwmgr as function parameter in smumgr. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 12 +- .../amd/powerplay/hwmgr/cz_clockpowergating.c | 8 +- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 90 +++--- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 2 +- .../gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 26 +- .../powerplay/hwmgr/smu7_clockpowergating.c | 60 ++-- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 137 ++++---- .../drm/amd/powerplay/hwmgr/smu7_powertune.c | 24 +- .../drm/amd/powerplay/hwmgr/smu7_thermal.c | 12 +- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 148 ++++----- .../amd/powerplay/hwmgr/vega10_powertune.c | 18 +- .../drm/amd/powerplay/hwmgr/vega10_thermal.c | 16 +- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 110 +++---- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 140 +++++---- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h | 4 +- .../gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 14 +- .../gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 292 +++++++++--------- .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 59 ++-- .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 130 ++++---- .../drm/amd/powerplay/smumgr/iceland_smc.c | 57 ++-- .../drm/amd/powerplay/smumgr/iceland_smumgr.c | 76 ++--- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 97 +++--- .../amd/powerplay/smumgr/polaris10_smumgr.c | 122 ++++---- .../gpu/drm/amd/powerplay/smumgr/rv_smumgr.c | 130 ++++---- .../gpu/drm/amd/powerplay/smumgr/rv_smumgr.h | 8 +- .../drm/amd/powerplay/smumgr/smu7_smumgr.c | 212 ++++++------- .../drm/amd/powerplay/smumgr/smu7_smumgr.h | 36 +-- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 89 +++--- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.c | 70 ++--- .../drm/amd/powerplay/smumgr/tonga_smumgr.c | 64 ++-- .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 194 ++++++------ .../drm/amd/powerplay/smumgr/vega10_smumgr.h | 16 +- 32 files changed, 1225 insertions(+), 1248 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 2634d792404af..c37ea9543ca38 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -83,7 +83,7 @@ static int pp_sw_init(void *handle) if (smumgr->smumgr_funcs->smu_init == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_init(smumgr); + ret = smumgr->smumgr_funcs->smu_init(pp_handle->hwmgr); pr_info("amdgpu: powerplay sw initialized\n"); } @@ -103,7 +103,7 @@ static int pp_sw_fini(void *handle) if (smumgr->smumgr_funcs->smu_fini == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_fini(smumgr); + ret = smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); } return ret; } @@ -122,9 +122,9 @@ static int pp_hw_init(void *handle) if (smumgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - if(smumgr->smumgr_funcs->start_smu(smumgr)) { + if(smumgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(smumgr); + smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return -EINVAL;; } if (ret == PP_DPM_DISABLED) @@ -246,10 +246,10 @@ static int pp_resume(void *handle) if (smumgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->start_smu(smumgr); + ret = smumgr->smumgr_funcs->start_smu(pp_handle->hwmgr); if (ret) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(smumgr); + smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 576b61eb6b8f7..44de0874629fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -113,12 +113,12 @@ int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) PHM_PlatformCaps_UVDDPM)) { cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled; dpm_features |= UVD_DPM_MASK; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); } else { dpm_features |= UVD_DPM_MASK; cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); } return 0; @@ -134,12 +134,12 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) PHM_PlatformCaps_VCEDPM)) { cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled; dpm_features |= VCE_DPM_MASK; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); } else { dpm_features |= VCE_DPM_MASK; cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 045fdb3da056d..73bb99d62a44a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -162,8 +162,8 @@ static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr) struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); if (cz_hwmgr->max_sclk_level == 0) { - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel); - cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1; + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); + cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1; } return cz_hwmgr->max_sclk_level; @@ -462,7 +462,7 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) if (!hwmgr->need_pp_table_upload) return 0; - ret = smum_download_powerplay_table(hwmgr->smumgr, &table); + ret = smum_download_powerplay_table(hwmgr, &table); PP_ASSERT_WITH_CODE((0 == ret && NULL != table), "Fail to get clock table from SMU!", return -EINVAL;); @@ -554,7 +554,7 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr) (uint8_t)dividers.pll_post_divider; } - ret = smum_upload_powerplay_table(hwmgr->smumgr); + ret = smum_upload_powerplay_table(hwmgr); return ret; } @@ -598,8 +598,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr) cz_hwmgr->uvd_dpm.soft_min_clk = 0; cz_hwmgr->uvd_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].vclk; @@ -625,8 +625,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.soft_min_clk = 0; cz_hwmgr->vce_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].ecclk; @@ -652,8 +652,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr) cz_hwmgr->acp_dpm.soft_min_clk = 0; cz_hwmgr->acp_dpm.hard_min_clk = 0; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel); - level = smum_get_argument(hwmgr->smumgr); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); + level = smum_get_argument(hwmgr); if (level < table->count) clock = table->entries[level].acpclk; @@ -709,7 +709,7 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { cz_hwmgr->sclk_dpm.hard_min_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkHardMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.hard_min_clk, @@ -735,7 +735,7 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { cz_hwmgr->sclk_dpm.soft_min_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, @@ -746,7 +746,7 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_StablePState) && cz_hwmgr->sclk_dpm.soft_max_clk != clock) { cz_hwmgr->sclk_dpm.soft_max_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -766,7 +766,7 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepSclk, clks); } @@ -779,7 +779,7 @@ static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr) struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWatermarkFrequency, cz_hwmgr->sclk_dpm.soft_max_clk); @@ -794,13 +794,13 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b if (enable) { PP_DBG_LOG("enable Low Memory PState.\n"); - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableLowMemoryPstate, (lock ? 1 : 0)); } else { PP_DBG_LOG("disable Low Memory PState.\n"); - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableLowMemoryPstate, (lock ? 1 : 0)); } @@ -820,7 +820,7 @@ static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr) cz_nbdpm_pstate_enable_disable(hwmgr, true, true); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); if (ret == 0) @@ -841,7 +841,7 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr) PP_DBG_LOG("enabling ALL SMU features.\n"); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); if (ret == 0) @@ -968,7 +968,7 @@ static int cz_start_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled; dpm_features |= SCLK_DPM_MASK; - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, dpm_features); @@ -984,7 +984,7 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr) if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) { dpm_features |= SCLK_DPM_MASK; cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled; - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, dpm_features); } @@ -998,13 +998,13 @@ static int cz_program_bootup_state(struct pp_hwmgr *hwmgr) cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock; cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1026,9 +1026,9 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr, int result; unsigned long features; - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); if (result == 0) { - features = smum_get_argument(hwmgr->smumgr); + features = smum_get_argument(hwmgr); if (features & check_feature) return true; } @@ -1178,13 +1178,13 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1216,13 +1216,13 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) cz_hwmgr->sclk_dpm.soft_max_clk = clock; cz_hwmgr->sclk_dpm.hard_max_clk = clock; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMin)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk, @@ -1235,13 +1235,13 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, PPSMC_MSG_SetSclkSoftMax)); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, cz_get_sclk_level(hwmgr, cz_hwmgr->sclk_dpm.soft_min_clk, @@ -1281,7 +1281,7 @@ int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); return 0; } @@ -1293,11 +1293,11 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDynamicPowerGating)) { return smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_UVDPowerON, 1); } else { return smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_UVDPowerON, 0); } } @@ -1319,7 +1319,7 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) cz_hwmgr->uvd_dpm.hard_min_clk = ptable->entries[ptable->count - 1].vclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUvdHardMin, cz_get_uvd_level(hwmgr, cz_hwmgr->uvd_dpm.hard_min_clk, @@ -1349,7 +1349,7 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk = ptable->entries[ptable->count - 1].ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, cz_get_eclk_level(hwmgr, cz_hwmgr->vce_dpm.hard_min_clk, @@ -1357,15 +1357,15 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) } else { /*Program HardMin based on the vce_arbiter.ecclk */ if (hwmgr->vce_arbiter.ecclk == 0) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, 0); /* disable ECLK DPM 0. Otherwise VCE could hang if * switching SCLK from DPM 0 to 6/7 */ - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkSoftMin, 1); } else { cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkHardMin, cz_get_eclk_level(hwmgr, cz_hwmgr->vce_dpm.hard_min_clk, @@ -1379,7 +1379,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerOFF); return 0; } @@ -1388,7 +1388,7 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerON); return 0; } @@ -1538,7 +1538,7 @@ static void cz_hw_print_display_cfg( PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n", data); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplaySizePowerParams, data); } @@ -1603,10 +1603,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMin, mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSclkSoftMax, mask); break; @@ -1848,7 +1848,7 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); + result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity); if (0 == result) { activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); activity_percent = activity_percent > 100 ? 100 : activity_percent; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 3f7cf559c81f8..16101c392c3b9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -818,7 +818,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) for (i = 0; i < vddc_table->count; i++) { if (req_vddc <= vddc_table->entries[i].vddc) { req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VddC_Request, req_volt); return; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 603035a5a4dbd..9186b0788fac8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -174,33 +174,33 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinVcn, (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min); } if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, hwmgr->gfx_arbiter.sclk_hard_min / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq); } if ((hwmgr->gfx_arbiter.gfxclk != 0) && (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinVideoGfxclkFreq, hwmgr->gfx_arbiter.gfxclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->gfx_actual_soft_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq); } if ((hwmgr->gfx_arbiter.fclk != 0) && (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinVideoFclkFreq, hwmgr->gfx_arbiter.fclk / 100); - rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->fabric_actual_soft_min_freq); + rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq); } return 0; @@ -212,7 +212,7 @@ static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) { rv_data->deep_sleep_dcefclk = clock/100; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, rv_data->deep_sleep_dcefclk); } @@ -225,7 +225,7 @@ static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) if (rv_data->num_active_display != count) { rv_data->num_active_display = count; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDisplayCount, rv_data->num_active_display); } @@ -277,7 +277,7 @@ static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr) struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); if (rv_data->gfx_off_controled_by_driver) - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); return 0; @@ -293,7 +293,7 @@ static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr) struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); if (rv_data->gfx_off_controled_by_driver) - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff); return 0; @@ -383,7 +383,7 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr) DpmClocks_t *table = &(rv_data->clock_table); struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info); - result = rv_copy_table_from_smc(hwmgr->smumgr, (uint8_t *)table, CLOCKTABLE); + result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE); PP_ASSERT_WITH_CODE((0 == result), "Attempt to copy clock table from smc failed", @@ -799,7 +799,7 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr, return -EINVAL; } - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, + result = smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); return result; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 48f60dedac2bf..69a0678ace98b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -27,21 +27,21 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); } static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); } static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) { - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + return smum_send_msg_to_smc(hwmgr, enable ? PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); } @@ -70,7 +70,7 @@ static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); return 0; } @@ -80,10 +80,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) if (phm_cf_want_uvd_power_gating(hwmgr)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDynamicPowerGating)) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDPowerON, 1); } else { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDPowerON, 0); } } @@ -94,7 +94,7 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerOFF); return 0; } @@ -102,7 +102,7 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_VCEPowerON); return 0; } @@ -111,7 +111,7 @@ static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SAMPowerOFF); return 0; } @@ -120,7 +120,7 @@ static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SAMPowerON); return 0; } @@ -235,7 +235,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -245,7 +245,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -258,7 +258,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -269,7 +269,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_3DLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -282,7 +282,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_RLC_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -295,7 +295,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_GFX_CP_LS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -309,7 +309,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, CG_GFX_OTHERS_MGCG_MASK); if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -329,7 +329,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -339,7 +339,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_BIF_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -352,7 +352,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -363,7 +363,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_MC_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -376,7 +376,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -386,7 +386,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_DRM_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -399,7 +399,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -410,7 +410,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_HDP_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -423,7 +423,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGCG_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } @@ -434,7 +434,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_SDMA_MGLS_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -447,7 +447,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, value = CG_SYS_ROM_MASK; if (smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, msg, value)) + hwmgr, msg, value)) return -EINVAL; } break; @@ -487,9 +487,9 @@ int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) active_cus = sys_info.value; if (enable) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus); else - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GFX_CU_PG_DISABLE); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 4c603e53a171a..bc2f227559cdf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -164,7 +164,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) { if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); return 0; } @@ -301,28 +301,28 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); } - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); PP_ASSERT_WITH_CODE( (data->vddc_voltage_table.count <= tmp), "Too many voltage values for VDDC. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddc_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); PP_ASSERT_WITH_CODE( (data->vddgfx_voltage_table.count <= tmp), "Too many voltage values for VDDC. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddgfx_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); PP_ASSERT_WITH_CODE( (data->vddci_voltage_table.count <= tmp), "Too many voltage values for VDDCI. Trimming to fit state table.", phm_trim_voltage_table_to_fit_state_table(tmp, &(data->vddci_voltage_table))); - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); PP_ASSERT_WITH_CODE( (data->mvdd_voltage_table.count <= tmp), "Too many voltage values for MVDD. Trimming to fit state table.", @@ -471,7 +471,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) { - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); } /** @@ -529,7 +529,7 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) data->pcie_gen_performance = data->pcie_gen_power_saving; data->pcie_lane_performance = data->pcie_lane_power_saving; } - tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK); + tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, tmp, MAX_REGULAR_DPM_NUMBER); @@ -610,27 +610,27 @@ static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) phm_reset_single_dpm_table( &data->dpm_table.sclk_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.mclk_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.vddc_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.vddci_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); phm_reset_single_dpm_table( &data->dpm_table.mvdd_table, - smum_get_mac_definition(hwmgr->smumgr, + smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD), MAX_REGULAR_DPM_NUMBER); return 0; @@ -840,7 +840,7 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableVRHotGPIOInterrupt); return 0; @@ -858,7 +858,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); return 0; } @@ -868,7 +868,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); return 0; } @@ -877,12 +877,12 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) PP_ASSERT_WITH_CODE(false, "Attempt to enable Master Deep Sleep switch failed!", return -EINVAL); } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_OFF)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", @@ -897,7 +897,7 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_OFF)) { PP_ASSERT_WITH_CODE(false, "Attempt to disable Master Deep Sleep switch failed!", @@ -913,12 +913,12 @@ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t soft_register_value = 0; uint32_t handshake_disables_offset = data->soft_regs_start - + smum_get_offsetof(hwmgr->smumgr, + + smum_get_offsetof(hwmgr, SMU_SoftRegisters, HandshakeDisables); soft_register_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, handshake_disables_offset); - soft_register_value |= smum_get_mac_definition(hwmgr->smumgr, + soft_register_value |= smum_get_mac_definition(hwmgr, SMU_UVD_MCLK_HANDSHAKE_DISABLE); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, handshake_disables_offset, soft_register_value); @@ -932,7 +932,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) /* enable SCLK dpm */ if (!data->sclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), "Failed to enable SCLK DPM during DPM Start Function!", return -EINVAL); @@ -941,7 +941,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) smu7_disable_handshake_uvd(hwmgr); PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Enable)), "Failed to enable MCLK DPM during DPM Start Function!", return -EINVAL); @@ -989,7 +989,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters, + smum_get_offsetof(hwmgr, SMU_SoftRegisters, VoltageChangeTimeout), 0x1000); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); @@ -1006,7 +1006,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) /* enable PCIE dpm */ if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_Enable)), "Failed to enable pcie DPM during DPM Start Function!", return -EINVAL); @@ -1014,7 +1014,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Falcon_QuickTransition)) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableACDCGPIOInterrupt)), "Failed to enable AC DC GPIO Interrupt!", ); @@ -1032,7 +1032,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable SCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); } /* disable MCLK dpm */ @@ -1040,7 +1040,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to disable MCLK DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); } return 0; @@ -1060,7 +1060,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) /* disable PCIE dpm */ if (!data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, + (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_Disable) == 0), "Failed to disable pcie DPM during DPM Stop Function!", return -EINVAL); @@ -1072,7 +1072,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) "Trying to disable voltage DPM when DPM is disabled", return 0); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); return 0; } @@ -1226,7 +1226,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay); + smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); tmp_result = smu7_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), @@ -1692,7 +1692,7 @@ static int phm_add_voltage(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((0 != look_up_table->count), "Lookup Table empty.", return -EINVAL); - i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); PP_ASSERT_WITH_CODE((i >= look_up_table->count), "Lookup Table is full.", return -EINVAL); @@ -2423,7 +2423,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, level); } } @@ -2436,7 +2436,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, (1 << level)); } @@ -2450,7 +2450,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) level++; if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, (1 << level)); } @@ -2469,14 +2469,14 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (!data->sclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.sclk_dpm_enable_mask); } if (!data->mclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.mclk_dpm_enable_mask); } @@ -2492,7 +2492,7 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return -EINVAL; if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); } @@ -2509,7 +2509,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, (1 << level)); @@ -2519,7 +2519,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, (1 << level)); } @@ -2529,7 +2529,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { level = phm_get_lowest_enabled_level(hwmgr, data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, (level)); } @@ -3005,7 +3005,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, [smu7_power_state->performance_level_count++]); PP_ASSERT_WITH_CODE( - (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), "Performance levels exceeds SMC limit!", return -EINVAL); @@ -3169,7 +3169,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, data->highest_mclk = memory_clock; PP_ASSERT_WITH_CODE( - (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), "Performance levels exceeds SMC limit!", return -EINVAL); @@ -3315,14 +3315,14 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, struct pp_gpu_power *query) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart), "Failed to start pm status log!", return -1); msleep_interruptible(20); - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample), "Failed to sample pm status log!", return -1); @@ -3356,19 +3356,19 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); *((uint32_t *)value) = sclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GFX_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); *((uint32_t *)value) = mclk; *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: - offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + offset = data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, AverageGraphicsActivity); @@ -3535,7 +3535,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze SCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); @@ -3547,7 +3547,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to freeze MCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", return -EINVAL); @@ -3765,7 +3765,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze SCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); @@ -3777,7 +3777,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), "Trying to Unfreeze MCLK DPM when DPM is disabled", ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); @@ -3828,9 +3828,9 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) int ret = 0; if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); - ret = (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; + ret = (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } return ret; } @@ -3905,7 +3905,7 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); } @@ -3914,7 +3914,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) { PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; + return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; } static int @@ -3977,12 +3977,12 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, PreVBlankGap), 0x64); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); @@ -4007,7 +4007,7 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f hwmgr->thermal_controller. advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); } @@ -4257,13 +4257,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); break; case PP_MCLK: if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); break; @@ -4276,7 +4276,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, level++; if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, level); break; @@ -4300,7 +4300,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); for (i = 0; i < sclk_table->count; i++) { @@ -4316,7 +4316,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); for (i = 0; i < mclk_table->count; i++) { @@ -4602,7 +4602,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, if (sclk_mask) { if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SCLKDPM_SetEnabledMask, data->dpm_level_enable_mask. sclk_dpm_enable_mask & @@ -4611,7 +4611,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, if (mclk_mask) { if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_MCLKDPM_SetEnabledMask, data->dpm_level_enable_mask. mclk_dpm_enable_mask & @@ -4623,8 +4623,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) { - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); if (smu_data == NULL) return -EINVAL; @@ -4636,13 +4635,13 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr->smumgr, PPSMC_MSG_EnableAvfs), + hwmgr, PPSMC_MSG_EnableAvfs), "Failed to enable AVFS!", return -EINVAL); } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr->smumgr, PPSMC_MSG_DisableAvfs), + hwmgr, PPSMC_MSG_DisableAvfs), "Failed to disable AVFS!", return -EINVAL); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 0fbaeb19a542a..a20d67a78b70e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -660,7 +660,7 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) didt_block |= block_en << TCP_Enable_SHIFT; if (enable) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block); return result; } @@ -781,7 +781,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); if (hwmgr->chip_id == CHIP_POLARIS11) { - result = smum_send_msg_to_smc(hwmgr->smumgr, + result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), "Failed to enable DPM DIDT.", return result); @@ -809,7 +809,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) "Post DIDT enable clock gating failed.", return result); if (hwmgr->chip_id == CHIP_POLARIS11) { - result = smum_send_msg_to_smc(hwmgr->smumgr, + result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), "Failed to disable DPM DIDT.", return result); @@ -827,7 +827,7 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_CAC)) { int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableCac)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable CAC in SMC.", result = -1); @@ -843,7 +843,7 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) int result = 0; if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + int smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableCac)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable CAC in SMC.", result = -1); @@ -859,7 +859,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PkgPwrSetLimit, n); return 0; } @@ -867,7 +867,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr, uint32_t target_tdp) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); } @@ -888,7 +888,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_TDCLimitEnable)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable TDCLimit in SMC.", result = -1;); @@ -898,7 +898,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) } if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable PkgPwrTracking in SMC.", result = -1;); @@ -927,7 +927,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_TDCLimitDisable)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable TDCLimit in SMC.", @@ -936,7 +936,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDTE)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable DTE in SMC.", @@ -945,7 +945,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + smc_result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); PP_ASSERT_WITH_CODE((smc_result == 0), "Failed to disable PkgPwrTracking in SMC.", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 10e12b2b43858..d7aa643cdb517 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -152,7 +152,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) { cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM)) hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, @@ -165,12 +165,12 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) } else { cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); + result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl); } if (!result && hwmgr->thermal_controller. advanceFanControlParameters.ucTargetTemperature) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, hwmgr->thermal_controller. advanceFanControlParameters.ucTargetTemperature); @@ -183,7 +183,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) { hwmgr->fan_ctrl_enabled = false; - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); + return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl); } /** @@ -371,7 +371,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to enable internal thermal interrupts */ - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable); } /** @@ -389,7 +389,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr) CG_THERMAL_INT, THERM_INT_MASK, alert); /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable); } /** diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 439cb371c56ab..a59d282797f5b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -425,8 +425,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion); - vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version)); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); + vega10_read_arg_from_smc(hwmgr, &(data->smu_version)); /* ACG firmware has major version 5 */ if ((data->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; @@ -922,7 +922,7 @@ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr) { uint32_t features_enabled; - if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) { + if (!vega10_get_smc_features(hwmgr, &features_enabled)) { if (features_enabled & SMC_DPM_FEATURES) return true; } @@ -2272,21 +2272,21 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) uint32_t agc_btc_response; if (data->smu_features[GNLD_ACG].supported) { - if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); - vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc); + vega10_read_arg_from_smc(hwmgr, &agc_btc_response); if (1 == agc_btc_response) { if (1 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop); else if (2 == data->acg_loop_state) - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop); - if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop); + if (0 == vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = true; } else { @@ -2305,7 +2305,7 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_ACG].supported && data->smu_features[GNLD_ACG].enabled) - if (!vega10_enable_smc_features(hwmgr->smumgr, false, + if (!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_ACG].smu_feature_bitmap)) data->smu_features[GNLD_ACG].enabled = false; @@ -2355,14 +2355,14 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) if (data->smu_features[GNLD_AVFS].supported) { if (enable) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_AVFS].smu_feature_bitmap), "[avfs_control] Attempt to Enable AVFS feature Failed!", return -1); data->smu_features[GNLD_AVFS].enabled = true; } else { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_AVFS].smu_feature_id), "[avfs_control] Attempt to Disable AVFS feature Failed!", @@ -2385,11 +2385,11 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32); - vega10_read_arg_from_smc(hwmgr->smumgr, &top32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); + vega10_read_arg_from_smc(hwmgr, &top32); - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32); - vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); + vega10_read_arg_from_smc(hwmgr, &bottom32); serial_number = ((uint64_t)bottom32 << 32) | top32; @@ -2403,7 +2403,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) avfs_fuse_table->VFT2_b = fuse.VFT2_b; avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1; avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2; - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)avfs_fuse_table, AVFSFUSETABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload FuseOVerride!", @@ -2542,14 +2542,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; if (0 != boot_up_values.usVddc) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFloorSocVoltage, (boot_up_values.usVddc * 4)); data->vbios_boot_state.bsoc_vddc_lock = true; } else { data->vbios_boot_state.bsoc_vddc_lock = false; } - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); } @@ -2575,7 +2575,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) vega10_populate_and_upload_avfs_fuse_override(hwmgr); - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)pp_table, PPTABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); @@ -2598,7 +2598,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) pr_info("THERMAL Feature Already enabled!"); PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_THERMAL].smu_feature_bitmap), "Enable THERMAL Feature Failed!", @@ -2618,7 +2618,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr) pr_info("THERMAL Feature Already disabled!"); PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_THERMAL].smu_feature_bitmap), "disable THERMAL Feature Failed!", @@ -2637,7 +2637,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) { if (data->smu_features[GNLD_VR0HOT].supported) { PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), "Attempt to Enable VR0 Hot feature Failed!", @@ -2646,7 +2646,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) } else { if (data->smu_features[GNLD_VR1HOT].supported) { PP_ASSERT_WITH_CODE( - !vega10_enable_smc_features(hwmgr->smumgr, + !vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), "Attempt to Enable VR0 Hot feature Failed!", @@ -2664,7 +2664,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.ulv_support) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ULV].smu_feature_bitmap), "Enable ULV Feature Failed!", return -1); @@ -2680,7 +2680,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.ulv_support) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_ULV].smu_feature_bitmap), "disable ULV Feature Failed!", return -EINVAL); @@ -2696,7 +2696,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DS_GFXCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), "Attempt to Enable DS_GFXCLK Feature Failed!", return -EINVAL); @@ -2704,7 +2704,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_SOCCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), "Attempt to Enable DS_SOCCLK Feature Failed!", return -EINVAL); @@ -2712,7 +2712,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_LCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), "Attempt to Enable DS_LCLK Feature Failed!", return -EINVAL); @@ -2720,7 +2720,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_DCEFCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), "Attempt to Enable DS_DCEFCLK Feature Failed!", return -EINVAL); @@ -2736,7 +2736,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DS_GFXCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), "Attempt to disable DS_GFXCLK Feature Failed!", return -EINVAL); @@ -2744,7 +2744,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_SOCCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), "Attempt to disable DS_ Feature Failed!", return -EINVAL); @@ -2752,7 +2752,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_LCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), "Attempt to disable DS_LCLK Feature Failed!", return -EINVAL); @@ -2760,7 +2760,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) } if (data->smu_features[GNLD_DS_DCEFCLK].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap), "Attempt to disable DS_DCEFCLK Feature Failed!", return -EINVAL); @@ -2778,7 +2778,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), "Attempt to disable LED DPM feature failed!", return -EINVAL); data->smu_features[GNLD_LED_DISPLAY].enabled = false; @@ -2796,7 +2796,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask); + vega10_enable_smc_features(hwmgr, false, feature_mask); return 0; } @@ -2826,7 +2826,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - if (vega10_enable_smc_features(hwmgr->smumgr, + if (vega10_enable_smc_features(hwmgr, true, feature_mask)) { for (i = 0; i < GNLD_DPM_MAX; i++) { if (data->smu_features[i].smu_feature_bitmap & @@ -2836,21 +2836,21 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), "Attempt to Enable LED DPM feature Failed!", return -EINVAL); data->smu_features[GNLD_LED_DISPLAY].enabled = true; } if (data->vbios_boot_state.bsoc_vddc_lock) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFloorSocVoltage, 0); data->vbios_boot_state.bsoc_vddc_lock = false; } if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) { if (data->smu_features[GNLD_ACDC].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), "Attempt to Enable DS_GFXCLK Feature Failed!", return -1); @@ -2867,13 +2867,13 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) (struct vega10_hwmgr *)(hwmgr->backend); int tmp_result, result = 0; - tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to configure telemetry!", return tmp_result); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0); tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1; @@ -3674,7 +3674,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.gfx_boot_level != data->dpm_table.gfx_table.dpm_state.soft_min_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, data->smc_state_table.gfx_boot_level), "Failed to set soft min sclk index!", @@ -3690,14 +3690,14 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinSocclkByIndex, socclk_idx), "Failed to set soft min uclk index!", return -EINVAL); } else { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, data->smc_state_table.mem_boot_level), "Failed to set soft min uclk index!", @@ -3722,7 +3722,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.gfx_max_level != data->dpm_table.gfx_table.dpm_state.soft_max_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMaxGfxclkByIndex, data->smc_state_table.gfx_max_level), "Failed to set soft max sclk index!", @@ -3736,7 +3736,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) if (data->smc_state_table.mem_max_level != data->dpm_table.mem_table.dpm_state.soft_max_level) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMaxUclkByIndex, data->smc_state_table.mem_max_level), "Failed to set soft max mclk index!", @@ -3795,7 +3795,7 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DPM_VCE].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, enable, data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap), "Attempt to Enable/Disable DPM VCE Failed!", @@ -3825,7 +3825,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) cpu_to_le32(low_sclk_interrupt_threshold); /* This message will also enable SmcToHost Interrupt */ - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetLowGfxclkInterruptThreshold, (uint32_t)low_sclk_interrupt_threshold); } @@ -3861,7 +3861,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, "Failed to update SCLK threshold!", result = tmp_result); - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)pp_table, PPTABLE); PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); @@ -3921,12 +3921,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, { uint32_t value; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr), "Failed to get current package power!", return -EINVAL); - vega10_read_arg_from_smc(hwmgr->smumgr, &value); + vega10_read_arg_from_smc(hwmgr, &value); /* power value is an integer */ query->average_gpu_power = value << 8; @@ -3943,25 +3943,25 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx); + vega10_read_arg_from_smc(hwmgr, &sclk_idx); *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; *size = 4; } break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx); + vega10_read_arg_from_smc(hwmgr, &mclk_idx); *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; *size = 4; } break; case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0); + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); if (!ret) { - vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent); + vega10_read_arg_from_smc(hwmgr, &activity_percent); *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; *size = 4; } @@ -3996,7 +3996,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_disp) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, has_disp ? 0 : 1); } @@ -4031,7 +4031,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (!result) { clk_request = (clk_freq << 16) | clk_select; - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_RequestDisplayClockByFreq, clk_request); } @@ -4101,7 +4101,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk, + hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, min_clocks.dcefClockInSR /100), "Attempt to set divider for DCEFCLK Failed!",); } else { @@ -4113,7 +4113,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (min_clocks.memoryClock != 0) { idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); data->dpm_table.mem_table.dpm_state.soft_min_level= idx; } @@ -4596,11 +4596,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.sclk_dpm_key_disabled) break; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex), "Attempt to get current sclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read sclk index Failed!", return -1); @@ -4614,11 +4614,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, if (data->registry_data.mclk_dpm_key_disabled) break; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex), "Attempt to get current mclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read mclk index Failed!", return -1); @@ -4629,11 +4629,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, (i == now) ? "*" : ""); break; case PP_PCIE: - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex), "Attempt to get current mclk index Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, &now), "Attempt to read mclk index Failed!", return -1); @@ -4661,7 +4661,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { - result = vega10_copy_table_to_smc(hwmgr->smumgr, + result = vega10_copy_table_to_smc(hwmgr, (uint8_t *)wm_table, WMTABLE); PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL); data->water_marks_bitmap |= WaterMarksLoaded; @@ -4670,7 +4670,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if (data->water_marks_bitmap & WaterMarksLoaded) { cgs_get_active_displays_info(hwmgr->device, &info); num_turned_on_displays = info.display_count; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, num_turned_on_displays); } @@ -4683,7 +4683,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) (struct vega10_hwmgr *)(hwmgr->backend); if (data->smu_features[GNLD_DPM_UVD].supported) { - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, enable, data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap), "Attempt to Enable/Disable DPM UVD Failed!", @@ -4870,7 +4870,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr, if (!data->registry_data.sclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, sclk_idx), "Failed to set soft min sclk index!", @@ -4881,7 +4881,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr, if (!data->registry_data.mclk_dpm_key_disabled) PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, + hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, mclk_idx), "Failed to set soft min mclk index!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 0090ff1bf416d..d2f695692f772 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -926,7 +926,7 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) if (enable) { /* For Vega10, SMC does not support any mask yet. */ - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!"); } } @@ -1243,7 +1243,7 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr) } if (0 == result) { - PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result); data->smu_features[GNLD_DIDT].enabled = true; } @@ -1290,7 +1290,7 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr) } if (0 == result) { - PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result); data->smu_features[GNLD_DIDT].enabled = false; } @@ -1344,7 +1344,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) (struct vega10_hwmgr *)(hwmgr->backend); if (data->registry_data.enable_pkg_pwr_tracking_feature) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetPptLimit, n); return 0; @@ -1363,13 +1363,13 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_PPT].smu_feature_bitmap), "Attempt to enable PPT feature Failed!", data->smu_features[GNLD_PPT].supported = false); if (data->smu_features[GNLD_TDC].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_TDC].smu_feature_bitmap), "Attempt to enable PPT feature Failed!", data->smu_features[GNLD_TDC].supported = false); @@ -1390,13 +1390,13 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr) if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_PPT].smu_feature_bitmap), "Attempt to disable PPT feature Failed!", data->smu_features[GNLD_PPT].supported = false); if (data->smu_features[GNLD_TDC].supported) - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_TDC].smu_feature_bitmap), "Attempt to disable PPT feature Failed!", data->smu_features[GNLD_TDC].supported = false); @@ -1408,7 +1408,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr) static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, uint32_t adjust_percent) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_OverDriveSetPercentage, adjust_percent); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index ce873e40a8fdb..1feefac49ea9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -31,11 +31,11 @@ static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm), "Attempt to get current RPM from SMC Failed!", return -1); - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr, current_rpm), "Attempt to read current RPM from SMC Failed!", return -1); @@ -199,7 +199,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FAN_CONTROL].supported) { PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( - hwmgr->smumgr, true, + hwmgr, true, data->smu_features[GNLD_FAN_CONTROL]. smu_feature_bitmap), "Attempt to Enable FAN CONTROL feature Failed!", @@ -216,7 +216,7 @@ static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FAN_CONTROL].supported) { PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( - hwmgr->smumgr, false, + hwmgr, false, data->smu_features[GNLD_FAN_CONTROL]. smu_feature_bitmap), "Attempt to Enable FAN CONTROL feature Failed!", @@ -458,7 +458,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_FW_CTF].enabled) printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n"); - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), "Attempt to Enable FW CTF feature Failed!", @@ -490,7 +490,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) printk("[Thermal_EnableAlert] FW CTF Already disabled!\n"); - PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), "Attempt to disable FW CTF feature Failed!", @@ -546,7 +546,7 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) table->FanTargetTemperature = hwmgr->thermal_controller. advanceFanControlParameters.usTMax; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanTemperatureTarget, (uint32_t)table->FanTargetTemperature); @@ -575,7 +575,7 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) table->FanStartTemp = hwmgr->thermal_controller. advanceFanControlParameters.usZeroRPMStartTemperature; - ret = vega10_copy_table_to_smc(hwmgr->smumgr, + ret = vega10_copy_table_to_smc(hwmgr, (uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE); if (ret) pr_info("Failed to update Fan Control Table in PPTable!"); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index f807dd639aede..58581e1bbf501 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -103,21 +103,21 @@ enum SMU_MAC_DEFINITION { struct pp_smumgr_func { - int (*smu_init)(struct pp_smumgr *smumgr); - int (*smu_fini)(struct pp_smumgr *smumgr); - int (*start_smu)(struct pp_smumgr *smumgr); - int (*check_fw_load_finish)(struct pp_smumgr *smumgr, + int (*smu_init)(struct pp_hwmgr *hwmgr); + int (*smu_fini)(struct pp_hwmgr *hwmgr); + int (*start_smu)(struct pp_hwmgr *hwmgr); + int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, uint32_t firmware); - int (*request_smu_load_fw)(struct pp_smumgr *smumgr); - int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr, + int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); + int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, uint32_t firmware); - int (*get_argument)(struct pp_smumgr *smumgr); - int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg); - int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr, + int (*get_argument)(struct pp_hwmgr *hwmgr); + int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); + int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); - int (*download_pptable_settings)(struct pp_smumgr *smumgr, + int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, void **table); - int (*upload_pptable_settings)(struct pp_smumgr *smumgr); + int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); int (*process_firmware_header)(struct pp_hwmgr *hwmgr); int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); @@ -132,7 +132,7 @@ struct pp_smumgr_func { bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); - bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr); + bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); }; struct pp_smumgr { @@ -148,30 +148,30 @@ struct pp_smumgr { extern int smum_early_init(struct pp_instance *handle); -extern int smum_get_argument(struct pp_smumgr *smumgr); +extern int smum_get_argument(struct pp_hwmgr *hwmgr); -extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table); +extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); -extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr); +extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); -extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); +extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); -extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -extern int smum_wait_on_register(struct pp_smumgr *smumgr, +extern int smum_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, +extern int smum_wait_for_register_unequal(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, +extern int smum_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask); extern void smum_wait_for_indirect_register_unequal( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask); @@ -181,7 +181,7 @@ extern int smu_allocate_memory(void *device, uint32_t size, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); -extern int vega10_smum_init(struct pp_smumgr *smumgr); +extern int vega10_smum_init(struct pp_hwmgr *hwmgr); extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); @@ -193,44 +193,44 @@ extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); -extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, +extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member); -extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value); +extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value); extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); -extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); +extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK -#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ +#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ port, index, value, mask) \ - smum_wait_on_indirect_register(smumgr, \ + smum_wait_on_indirect_register(hwmgr, \ mm##port##_INDEX, index, value, mask) -#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) +#define SMUM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ +#define SMUM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field) ) -#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ +#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ index, value, mask) \ - smum_wait_for_register_unequal(smumgr, \ + smum_wait_for_register_unequal(hwmgr, \ index, value, mask) -#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \ - SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ +#define SMUM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ + SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ mm##reg, value, mask) -#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \ - SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \ +#define SMUM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ + SMUM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) @@ -250,22 +250,22 @@ extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ port, index, value, mask) \ - smum_wait_on_indirect_register(smumgr, \ + smum_wait_on_indirect_register(hwmgr, \ mm##port##_INDEX_0, index, value, mask) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(smumgr, \ + smum_wait_for_indirect_register_unequal(hwmgr, \ mm##port##_INDEX_0, index, value, mask) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) +#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) /*Operations on named fields.*/ @@ -290,25 +290,25 @@ extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); reg, field, fieldval)) -#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ +#define SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) -#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ +#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(smumgr, \ +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + smum_wait_for_indirect_register_unequal(hwmgr, \ mm##port##_INDEX, index, value, mask) -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) +#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ +#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field) ) #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index b7a2391907852..2710a6fa3df03 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -94,7 +94,7 @@ static const struct ci_pt_defaults defaults_saturn_xt = { }; -static int ci_set_smc_sram_address(struct pp_smumgr *smumgr, +static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit) { if ((0 != (3 & smc_addr)) @@ -103,12 +103,12 @@ static int ci_set_smc_sram_address(struct pp_smumgr *smumgr, return -EINVAL; } - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); return 0; } -static int ci_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { int result; @@ -129,12 +129,12 @@ static int ci_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_add /* Bytes are written into the SMC address space with the MSB first. */ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - result = ci_set_smc_sram_address(smumgr, addr, limit); + result = ci_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; @@ -145,13 +145,13 @@ static int ci_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_add data = 0; - result = ci_set_smc_sram_address(smumgr, addr, limit); + result = ci_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); extra_shift = 8 * (4 - byte_count); @@ -165,61 +165,61 @@ static int ci_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_add data |= (original_data & ~((~0UL) << extra_shift)); - result = ci_set_smc_sram_address(smumgr, addr, limit); + result = ci_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); } return 0; } -static int ci_program_jump_on_start(struct pp_smumgr *smumgr) +static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr) { static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - ci_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); return 0; } -bool ci_is_smc_ram_running(struct pp_smumgr *smumgr) +bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, + && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } -static int ci_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) { int result; - result = ci_set_smc_sram_address(smumgr, smc_addr, limit); + result = ci_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); return 0; } -int ci_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!ci_is_smc_ram_running(smumgr)) + if (!ci_is_smc_ram_running(hwmgr)) return -EINVAL; - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d\n", msg, ret); @@ -227,11 +227,11 @@ int ci_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) return 0; } -int ci_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - return ci_send_msg_to_smc(smumgr, msg); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); + return ci_send_msg_to_smc(hwmgr, msg); } static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) @@ -510,7 +510,7 @@ int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) data->dpm_level_enable_mask.sclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - result = ci_copy_bytes_to_smc(hwmgr->smumgr, array, + result = ci_copy_bytes_to_smc(hwmgr, array, (u8 *)levels, array_size, SMC_RAM_END); @@ -553,7 +553,7 @@ static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (ci_read_smc_sram_dword(hwmgr->smumgr, + if (ci_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -686,7 +686,7 @@ static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (ci_read_smc_sram_dword(hwmgr->smumgr, + if (ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) { @@ -713,7 +713,7 @@ static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (ret) return ret; - ret = ci_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END); } @@ -1343,7 +1343,7 @@ int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - result = ci_copy_bytes_to_smc(hwmgr->smumgr, + result = ci_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1705,7 +1705,7 @@ static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (0 == result) { result = ci_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU7_Discrete_MCArbDramTimingTable), @@ -1756,10 +1756,10 @@ static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, return result; } -static int ci_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU7_Discrete_MCRegisters *mc_reg_table) { - const struct ci_smumgr *smu_data = (struct ci_smumgr *)smumgr->backend; + const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smumgr->backend; uint32_t i, j; @@ -1796,12 +1796,12 @@ static void ci_convert_mc_registers( } static int ci_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU7_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1831,7 +1831,7 @@ static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = ci_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -1845,8 +1845,7 @@ static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1864,7 +1863,7 @@ static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]); - return ci_copy_bytes_to_smc(hwmgr->smumgr, address, + return ci_copy_bytes_to_smc(hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, SMC_RAM_END); @@ -1873,11 +1872,10 @@ static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); - result = ci_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for the MC register addresses!", return result;); @@ -1885,7 +1883,7 @@ static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for driver state!", return result;); - return ci_copy_bytes_to_smc(smumgr, smu_data->mc_reg_table_start, + return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END); } @@ -1930,17 +1928,17 @@ static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, return 0; } -static int ci_start_smc(struct pp_smumgr *smumgr) +static int ci_start_smc(struct pp_hwmgr *hwmgr) { /* set smc instruct start point at 0x0 */ - ci_program_jump_on_start(smumgr); + ci_program_jump_on_start(hwmgr); /* enable smc clock */ - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; @@ -2105,7 +2103,7 @@ int ci_init_smc_table(struct pp_hwmgr *hwmgr) table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = ci_copy_bytes_to_smc(hwmgr->smumgr, smu_data->dpm_table_start + + result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController), @@ -2122,7 +2120,7 @@ int ci_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to populate PM fuses to SMC memory!", return result); - ci_start_smc(hwmgr->smumgr); + ci_start_smc(hwmgr); return 0; } @@ -2197,7 +2195,7 @@ int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = ci_copy_bytes_to_smc(hwmgr->smumgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); return 0; } @@ -2233,7 +2231,7 @@ int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = ci_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), @@ -2303,7 +2301,7 @@ uint32_t ci_get_mac_definition(uint32_t value) return 0; } -static int ci_load_smc_ucode(struct pp_smumgr *smumgr) +static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) { uint32_t byte_count, start_addr; uint8_t *src; @@ -2311,9 +2309,9 @@ static int ci_load_smc_ucode(struct pp_smumgr *smumgr) struct cgs_firmware_info info = {0}; - cgs_get_firmware_info(smumgr->device, CGS_UCODE_ID_SMU, &info); + cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); - smumgr->is_kicker = info.is_kicker; + hwmgr->smumgr->is_kicker = info.is_kicker; byte_count = info.image_size; src = (uint8_t *)info.kptr; start_addr = info.ucode_start_address; @@ -2323,15 +2321,15 @@ static int ci_load_smc_ucode(struct pp_smumgr *smumgr) return -EINVAL; } - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); for (; byte_count >= 4; byte_count -= 4) { data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; } - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); if (0 != byte_count) { pr_err("SMC size must be dividable by 4\n"); @@ -2343,18 +2341,18 @@ static int ci_load_smc_ucode(struct pp_smumgr *smumgr) static int ci_upload_firmware(struct pp_hwmgr *hwmgr) { - if (ci_is_smc_ram_running(hwmgr->smumgr)) { + if (ci_is_smc_ram_running(hwmgr)) { pr_info("smc is running, no need to load smc firmware\n"); return 0; } - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr->smumgr, SMC_IND, RCU_UC_EVENTS, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - return ci_load_smc_ucode(hwmgr->smumgr); + return ci_load_smc_ucode(hwmgr); } int ci_process_firmware_header(struct pp_hwmgr *hwmgr) @@ -2369,7 +2367,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) if (ci_upload_firmware(hwmgr)) return -EINVAL; - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2379,7 +2377,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2391,7 +2389,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2399,7 +2397,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) if (0 == result) ci_data->mc_reg_table_start = tmp; - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2409,7 +2407,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2419,7 +2417,7 @@ int ci_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = ci_read_smc_sram_dword(hwmgr->smumgr, + result = ci_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU7_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2726,7 +2724,7 @@ int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr->smumgr); + return ci_is_smc_ram_running(hwmgr); } int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, @@ -2750,6 +2748,6 @@ int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownH = request->down_hyst; } - return ci_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h index 05b36b8009425..cc4176d2d25ff 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h @@ -30,9 +30,9 @@ struct pp_smumgr; struct pp_hwmgr; struct amd_pp_profile; -int ci_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int ci_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr); int ci_init_smc_table(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 62f6bdae66127..d2e24e3a963d7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -30,7 +30,7 @@ #include "cgs_common.h" #include "ci_smc.h" -static int ci_smu_init(struct pp_smumgr *smumgr) +static int ci_smu_init(struct pp_hwmgr *hwmgr) { int i; struct ci_smumgr *ci_priv = NULL; @@ -43,20 +43,20 @@ static int ci_smu_init(struct pp_smumgr *smumgr) for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) ci_priv->activity_target[i] = 30; - smumgr->backend = ci_priv; + hwmgr->smumgr->backend = ci_priv; return 0; } -static int ci_smu_fini(struct pp_smumgr *smumgr) +static int ci_smu_fini(struct pp_hwmgr *hwmgr) { - kfree(smumgr->backend); - smumgr->backend = NULL; - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } -static int ci_start_smu(struct pp_smumgr *smumgr) +static int ci_start_smu(struct pp_hwmgr *hwmgr) { return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 652aaa43e95cc..a6fa0e86a8fd3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = { CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, }; -static int cz_smum_get_argument(struct pp_smumgr *smumgr) +static int cz_smum_get_argument(struct pp_hwmgr *hwmgr) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - return cgs_read_register(smumgr->device, + return cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); } -static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, - uint16_t msg) +static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + result = SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); return result; } - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); return 0; } /* Send a message to the SMC, and wait for its response.*/ -static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - result = cz_send_msg_to_smc_async(smumgr, msg); + result = cz_send_msg_to_smc_async(hwmgr, msg); if (result != 0) return result; - return SMUM_WAIT_FIELD_UNEQUAL(smumgr, + return SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); } -static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, +static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t limit) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; if (0 != (3 & smc_address)) { @@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, return -EINVAL; } - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0, + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); return 0; } -static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr, +static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t value, uint32_t limit) { int result; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = cz_set_smc_sram_address(smumgr, smc_address, limit); + result = cz_set_smc_sram_address(hwmgr, smc_address, limit); if (!result) - cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value); return result; } -static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc(smumgr, msg); + return cz_send_msg_to_smc(hwmgr, msg); } -static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, +static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t firmware) { int i; @@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); - for (i = 0; i < smumgr->usec_timeout; i++) { + for (i = 0; i < hwmgr->usec_timeout; i++) { if (firmware == - (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware)) + (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware)) break; udelay(1); } - if (i >= smumgr->usec_timeout) { + if (i >= hwmgr->usec_timeout) { pr_err("SMU check loaded firmware failed.\n"); return -EINVAL; } @@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, return 0; } -static int cz_load_mec_firmware(struct pp_smumgr *smumgr) +static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) { uint32_t reg_data; uint32_t tmp; @@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr) struct cgs_firmware_info info = {0}; struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; - ret = cgs_get_firmware_info(smumgr->device, + cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); if (ret) return -EINVAL; /* Disable MEC parsing/prefetching */ - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_MEC_CNTL); tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp); - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL); tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); reg_data = smu_lower_32_bits(info.mc_addr) & SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data); reg_data = smu_upper_32_bits(info.mc_addr) & SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data); return 0; } -static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, +static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, enum cz_scratch_entry firmware_enum) { uint8_t ret = 0; @@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_SDMA0; break; case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_SDMA0; else ret = UCODE_ID_SDMA1; @@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_CP_MEC_JT1; break; case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_CP_MEC_JT1; else ret = UCODE_ID_CP_MEC_JT2; @@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) } static int cz_smu_populate_single_scratch_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, uint8_t type, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = type; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->scratch_buffer_length; i++) @@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task( } static int cz_smu_populate_single_ucode_load_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->driver_buffer_length; i++) @@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task( return 0; } -static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_SAVE, true); return 0; } -static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) +static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) { int i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) @@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_SAVE, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_SAVE, true); @@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) } -static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id == CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); else - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); /* populate scratch */ - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_LOAD, true); return 0; } -static int cz_smu_construct_toc_for_power_profiling( - struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); return 0; } -static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; cz_smu->toc_entry_used_count = 0; - cz_smu_initialize_toc_empty_job_list(smumgr); - cz_smu_construct_toc_for_rlc_aram_save(smumgr); - cz_smu_construct_toc_for_vddgfx_enter(smumgr); - cz_smu_construct_toc_for_vddgfx_exit(smumgr); - cz_smu_construct_toc_for_power_profiling(smumgr); - cz_smu_construct_toc_for_bootup(smumgr); - cz_smu_construct_toc_for_clock_table(smumgr); + cz_smu_initialize_toc_empty_job_list(hwmgr); + cz_smu_construct_toc_for_rlc_aram_save(hwmgr); + cz_smu_construct_toc_for_vddgfx_enter(hwmgr); + cz_smu_construct_toc_for_vddgfx_exit(hwmgr); + cz_smu_construct_toc_for_power_profiling(hwmgr); + cz_smu_construct_toc_for_bootup(hwmgr); + cz_smu_construct_toc_for_clock_table(hwmgr); return 0; } -static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) +static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; uint32_t firmware_type; uint32_t i; int ret; @@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { - firmware_type = cz_translate_firmware_enum_to_arg(smumgr, + firmware_type = cz_translate_firmware_enum_to_arg(hwmgr, firmware_list[i]); ucode_id = cz_convert_fw_type_to_cgs(firmware_type); - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, ucode_id, &info); if (ret == 0) { @@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) } static int cz_smu_populate_single_scratch_entry( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry scratch_type, uint32_t ulsize_byte, struct cz_buffer_entry *entry) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; long long mc_addr = ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) | cz_smu->smu_buffer.mc_addr_low; @@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry( return 0; } -static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) +static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr; - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); return 0; } -static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) +static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) break; } - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); return 0; } -static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) +static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smumgr->backend); uint32_t smc_address; - if (!smumgr->reload_fw) { + if (!hwmgr->smumgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } - cz_smu_populate_firmware_entries(smumgr); + cz_smu_populate_firmware_entries(hwmgr); - cz_smu_construct_toc(smumgr); + cz_smu_construct_toc(hwmgr); smc_address = SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); + cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrHi, cz_smu->toc_buffer.mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrLo, cz_smu->toc_buffer.mc_addr_low); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_aram); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_power_profiling_index); - return cz_send_msg_to_smc_with_parameter(smumgr, + return cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_initialize_index); } -static int cz_start_smu(struct pp_smumgr *smumgr) +static int cz_start_smu(struct pp_hwmgr *hwmgr) { int ret = 0; uint32_t fw_to_check = 0; @@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr) UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - ret = cz_request_smu_load_fw(smumgr); + ret = cz_request_smu_load_fw(hwmgr); if (ret) pr_err("SMU firmware load failed\n"); - cz_check_fw_load_finish(smumgr, fw_to_check); + cz_check_fw_load_finish(hwmgr, fw_to_check); - ret = cz_load_mec_firmware(smumgr); + ret = cz_load_mec_firmware(hwmgr); if (ret) pr_err("Mec Firmware load failed\n"); return ret; } -static int cz_smu_init(struct pp_smumgr *smumgr) +static int cz_smu_init(struct pp_hwmgr *hwmgr) { uint64_t mc_addr = 0; int ret = 0; @@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) if (cz_smu == NULL) return -ENOMEM; - smumgr->backend = cz_smu; + hwmgr->smumgr->backend = cz_smu; cz_smu->toc_buffer.data_size = 4096; cz_smu->smu_buffer.data_size = @@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->toc_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->smu_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, UCODE_ID_RLC_SCRATCH_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { pr_err("Error when Populate Firmware Entry.\n"); return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, sizeof(struct SMU8_MultimediaPowerLogData), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, sizeof(struct SMU8_Fusion_ClkTable), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_fini(struct pp_smumgr *smumgr) +static int cz_smu_fini(struct pp_hwmgr *hwmgr) { struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; + cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; if (cz_smu) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->toc_buffer.handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->smu_buffer.handle); kfree(cz_smu); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 9f612dd395ac0..843ed7a665f65 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -338,7 +338,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -425,7 +425,7 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -473,7 +473,7 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -848,7 +848,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1032,7 +1032,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1359,7 +1359,7 @@ static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU73_Discrete_MCArbDramTimingTable), @@ -1683,9 +1683,9 @@ static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, return 0; } -static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) +static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); uint32_t tmp; int result; @@ -1697,7 +1697,7 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1706,7 +1706,7 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1771,7 +1771,7 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) } } if (mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LedConfig, mask); return 0; @@ -1974,7 +1974,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1983,7 +1983,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = fiji_init_arb_table_index(hwmgr->smumgr); + result = fiji_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -2093,20 +2093,20 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2122,13 +2122,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) return 0; - ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); if (!ret) /* If this param is not changed, this function could fire unnecessarily */ @@ -2168,7 +2167,7 @@ int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2269,7 +2268,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2301,7 +2300,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2328,7 +2327,7 @@ static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2367,7 +2366,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2377,7 +2376,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2389,7 +2388,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2397,7 +2396,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2407,7 +2406,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2417,7 +2416,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2482,6 +2481,6 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 6ae948fc524f7..dfdcff54947ad 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -58,122 +58,122 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } }; -static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for ROM firmware to initialize interrupt hendler */ - /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND, SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) +static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = PwrVirusTable; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -192,13 +192,13 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) +static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); result = -EINVAL; @@ -206,23 +206,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) } /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); /* clear reset */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); return result; } -static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { int32_t vr_config; uint32_t table_start; uint32_t level_addr, vr_config_addr; uint32_t level_size = sizeof(avfs_graphics_level); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &table_start, 0x40000), @@ -237,7 +237,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_addr = table_start + offsetof(SMU73_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC", @@ -245,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", return -1;); @@ -253,9 +253,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } -static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) +static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -265,17 +265,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) if (!smu_started) break; smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS ", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr), "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", return -EINVAL;); @@ -293,64 +293,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) return 0; } -static int fiji_start_smu(struct pp_smumgr *smumgr) +static int fiji_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smumgr->backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) - || cgs_is_virtualization_enabled(smumgr->device))) { - fiji_avfs_event_mgr(smumgr, false); + if (!(smu7_is_smc_ram_running(hwmgr) + || cgs_is_virtualization_enabled(hwmgr->device))) { + fiji_avfs_event_mgr(hwmgr, false); /* Check if SMU is running in protected mode */ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = fiji_start_smu_in_non_protection_mode(smumgr); + result = fiji_start_smu_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = fiji_start_smu_in_protection_mode(smumgr); + result = fiji_start_smu_in_protection_mode(hwmgr); if (result) return result; } - fiji_avfs_event_mgr(smumgr, true); + fiji_avfs_event_mgr(hwmgr, true); } /* To initialize all clock gating before RLC loaded and running.*/ - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &(priv->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse = 0; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; - if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { if (efuse) return true; @@ -365,7 +365,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) * @param smc_addr the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int fiji_smu_init(struct pp_smumgr *smumgr) +static int fiji_smu_init(struct pp_hwmgr *hwmgr) { int i; struct fiji_smumgr *fiji_priv = NULL; @@ -375,9 +375,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) if (fiji_priv == NULL) return -ENOMEM; - smumgr->backend = fiji_priv; + hwmgr->smumgr->backend = fiji_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 1ed3214a965f2..89d5a272e2368 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -163,7 +163,7 @@ static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offs const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -264,7 +264,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -318,7 +318,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -881,7 +881,7 @@ int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1246,7 +1246,7 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1507,7 +1507,7 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (0 == result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU71_Discrete_MCArbDramTimingTable), @@ -1561,10 +1561,10 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, return result; } -static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) { - const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend; + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smumgr->backend; uint32_t i, j; @@ -1601,13 +1601,12 @@ static void iceland_convert_mc_registers( } } -static int iceland_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, +static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU71_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1637,7 +1636,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = iceland_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -1651,8 +1650,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1671,7 +1669,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); - return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, + return smu7_copy_bytes_to_smc(hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, SMC_RAM_END); @@ -1680,11 +1678,10 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); - result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for the MC register addresses!", return result;); @@ -1692,7 +1689,7 @@ static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for driver state!", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END); } @@ -1944,7 +1941,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController), @@ -1954,7 +1951,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to upload dpm data to SMC memory!", return result;); /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.ulv_setting_starts, (uint8_t *)&(smu_data->ulv_setting), sizeof(SMU71_Discrete_Ulv), @@ -2053,7 +2050,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) /* fan_table.FanControl_GL_Flag = 1; */ - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); return 0; } @@ -2090,7 +2087,7 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2177,7 +2174,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2188,7 +2185,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2201,7 +2198,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2210,7 +2207,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) smu7_data->mc_reg_table_start = tmp; } - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2221,7 +2218,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2233,7 +2230,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2244,7 +2241,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, UlvSettings), &tmp, SMC_RAM_END); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 0bf2def3b6592..d665272993083 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -39,55 +39,55 @@ #define ICELAND_SMC_SIZE 0x20000 -static int iceland_start_smc(struct pp_smumgr *smumgr) +static int iceland_start_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); return 0; } -static void iceland_reset_smc(struct pp_smumgr *smumgr) +static void iceland_reset_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); } -static void iceland_stop_smc_clock(struct pp_smumgr *smumgr) +static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); } -static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); } -static int iceland_smu_start_smc(struct pp_smumgr *smumgr) +static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr) { /* set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* enable smc clock */ - iceland_start_smc_clock(smumgr); + iceland_start_smc_clock(hwmgr); /* de-assert reset */ - iceland_start_smc(smumgr); + iceland_start_smc(hwmgr); - SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, +static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, const uint8_t *src, uint32_t limit, uint32_t start_addr) { @@ -96,17 +96,17 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); while (byte_count >= 4) { data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; } - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -114,16 +114,16 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, } -static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr) { uint32_t val; struct cgs_firmware_info info = {0}; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; /* load SMC firmware */ - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (info.image_size & 3) { @@ -137,56 +137,56 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) } /* wait for smc boot up */ - SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* clear firmware interrupt enable flag */ - val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL, val | 1); /* stop smc clock */ - iceland_stop_smc_clock(smumgr); + iceland_stop_smc_clock(hwmgr); /* reset smc */ - iceland_reset_smc(smumgr); - iceland_upload_smc_firmware_data(smumgr, info.image_size, + iceland_reset_smc(hwmgr); + iceland_upload_smc_firmware_data(hwmgr, info.image_size, (uint8_t *)info.kptr, ICELAND_SMC_SIZE, info.ucode_start_address); return 0; } -static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, +static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr, uint32_t firmwareType) { return 0; } -static int iceland_start_smu(struct pp_smumgr *smumgr) +static int iceland_start_smu(struct pp_hwmgr *hwmgr) { int result; - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { pr_info("smu not running, upload firmware again \n"); - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -198,7 +198,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int iceland_smu_init(struct pp_smumgr *smumgr) +static int iceland_smu_init(struct pp_hwmgr *hwmgr) { int i; struct iceland_smumgr *iceland_priv = NULL; @@ -208,9 +208,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr) if (iceland_priv == NULL) return -ENOMEM; - smumgr->backend = iceland_priv; + hwmgr->smumgr->backend = iceland_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 2d444bb4802a2..118315186bc3d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -231,7 +231,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -315,7 +315,7 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -358,7 +358,7 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -484,7 +484,6 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct pp_smumgr *smumgr = hwmgr->smumgr; state->CcPwrDynRm = 0; state->CcPwrDynRm1 = 0; @@ -493,7 +492,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker) + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; else state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; @@ -546,8 +545,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, SMU74_Discrete_DpmTable *table) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); uint32_t i, ref_clk; struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; @@ -597,8 +595,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, uint32_t clock, SMU_SclkSetting *sclk_setting) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct pp_atomctrl_clock_dividers_ai dividers; uint32_t ref_clock; @@ -741,9 +738,8 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -828,7 +824,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -890,9 +886,8 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -933,7 +928,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1191,9 +1186,8 @@ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); struct SMU74_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1212,7 +1206,7 @@ static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU74_Discrete_MCArbDramTimingTable), @@ -1311,9 +1305,8 @@ static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1344,8 +1337,7 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1472,8 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); int result = 0; @@ -1524,20 +1515,20 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); } - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_meanNsigma, sizeof(AVFS_meanNsigma_t), SMC_RAM_END); - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_SclkOffset, sizeof(AVFS_Sclk_Offset_t), @@ -1559,9 +1550,9 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) +static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); uint32_t tmp; int result; @@ -1573,7 +1564,7 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1582,7 +1573,7 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1648,9 +1639,8 @@ static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1842,7 +1832,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1851,7 +1841,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = polaris10_init_arb_table_index(hwmgr->smumgr); + result = polaris10_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -1878,17 +1868,16 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return 0; - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); - ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? + ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ? 0 : -1; if (!ret) @@ -1990,20 +1979,20 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2041,7 +2030,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2073,7 +2062,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2100,7 +2089,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2164,7 +2153,7 @@ int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2258,7 +2247,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2268,7 +2257,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2280,7 +2269,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2288,7 +2277,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2298,7 +2287,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2308,7 +2297,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2349,6 +2338,6 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 75f43dadc56ba..55ba76202aa34 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -60,21 +60,21 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; -static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) +static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -93,13 +93,13 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int polaris10_perform_btc(struct pp_smumgr *smumgr) +static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -107,16 +107,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) if (smu_data->avfs.avfs_btc_param > 1) { /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); } return result; } -static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { uint32_t vr_config; uint32_t dpm_table_start; @@ -127,7 +127,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_size = sizeof(avfs_graphics_level_polaris10); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &dpm_table_start, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", @@ -138,14 +138,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address, (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", return -1); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_graphics_level_polaris10), graphics_level_size, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", @@ -153,7 +153,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", return -1); @@ -162,7 +162,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", return -1); @@ -172,9 +172,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) static int -polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) +polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -183,20 +183,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */ smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", return -EINVAL); if (smu_data->avfs.avfs_btc_param > 1) { pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", return -EINVAL); } smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", return -EINVAL); smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; @@ -215,7 +215,7 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) return 0; } -static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; @@ -223,138 +223,138 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /* Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait done bit to be set */ /* Check pass/failed indicator */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu(struct pp_smumgr *smumgr) +static int polaris10_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { SMU_VFT_INTACT = false; - smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { - result = polaris10_start_smu_in_non_protection_mode(smumgr); + result = polaris10_start_smu_in_non_protection_mode(hwmgr); } else { - result = polaris10_start_smu_in_protection_mode(smumgr); + result = polaris10_start_smu_in_protection_mode(hwmgr); /* If failed, try with different security Key. */ if (result != 0) { smu_data->smu7_data.security_hard_key ^= 1; - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - result = polaris10_start_smu_in_protection_mode(smumgr); + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); + result = polaris10_start_smu_in_protection_mode(hwmgr); } } if (result != 0) PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); - polaris10_avfs_event_mgr(smumgr, true); + polaris10_avfs_event_mgr(hwmgr, true); } else SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ - polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); + polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT); /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &(smu_data->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse; - efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); efuse &= 0x00000001; if (efuse) return true; @@ -362,7 +362,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) return false; } -static int polaris10_smu_init(struct pp_smumgr *smumgr) +static int polaris10_smu_init(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data; int i; @@ -371,9 +371,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) if (smu_data == NULL) return -ENOMEM; - smumgr->backend = smu_data; + hwmgr->smumgr->backend = smu_data; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index ce0a30388ea11..54d3052dd1571 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c @@ -48,20 +48,20 @@ #define smnMP1_FIRMWARE_FLAGS 0x3010028 -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) return false; } -static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + smum_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } -int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } -int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smumgr->backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL;); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, return 0; } -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smumgr->backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -static int rv_verify_smc_interface(struct pp_smumgr *smumgr) +static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr, &smc_driver_if_version), "Attempt to read SMC IF Version Number Failed!", return -EINVAL); @@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr) } /* sdma is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma), "Attempt to power up sdma Failed!", return -EINVAL); @@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma), "Attempt to power down sdma Failed!", return -EINVAL); @@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) } /* vcn is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0), "Attempt to power up vcn Failed!", return -EINVAL); @@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0), "Attempt to power down vcn Failed!", return -EINVAL); @@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smu_fini(struct pp_smumgr *smumgr) +static int rv_smu_fini(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smumgr->backend); if (priv) { - rv_smc_disable_sdma(smumgr); - rv_smc_disable_vcn(smumgr); - cgs_free_gpu_mem(smumgr->device, + rv_smc_disable_sdma(hwmgr); + rv_smc_disable_vcn(hwmgr); + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[CLOCKTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; } return 0; } -static int rv_start_smu(struct pp_smumgr *smumgr) +static int rv_start_smu(struct pp_hwmgr *hwmgr) { - if (rv_verify_smc_interface(smumgr)) + if (rv_verify_smc_interface(hwmgr)) return -EINVAL; - if (rv_smc_enable_sdma(smumgr)) + if (rv_smc_enable_sdma(hwmgr)) return -EINVAL; - if (rv_smc_enable_vcn(smumgr)) + if (rv_smc_enable_vcn(hwmgr)) return -EINVAL; return 0; } -static int rv_smu_init(struct pp_smumgr *smumgr) +static int rv_smu_init(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv; uint64_t mc_addr; @@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smumgr->backend = priv; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; return -EINVAL); priv->smu_tables.entry[WMTABLE].version = 0x01; @@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(DpmClocks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for CLOCKTABLE.", - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; return -EINVAL); priv->smu_tables.entry[CLOCKTABLE].version = 0x01; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c01..58888400f1b8d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -51,11 +51,11 @@ struct rv_smumgr { struct smu_table_array smu_tables; }; -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr); -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index f128b03f2327c..cd283e5af68ca 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -34,18 +34,18 @@ #define SMU7_SMC_SIZE 0x20000 -static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) +static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit) { PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ return 0; } -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) { uint32_t data; uint32_t addr; @@ -59,7 +59,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres addr = smc_start_address; while (byte_count >= 4) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *dest = PP_SMC_TO_HOST_UL(data); @@ -69,7 +69,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } if (byte_count) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *pdata = PP_SMC_TO_HOST_UL(data); /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ dest_byte = (uint8_t *)dest; @@ -81,7 +81,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { int result; @@ -99,12 +99,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, /* Bytes are written into the SMC addres space with the MSB first. */ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); src += 4; byte_count -= 4; @@ -115,13 +115,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data = 0; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); extra_shift = 8 * (4 - byte_count); @@ -135,53 +135,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data |= (original_data & ~((~0UL) << extra_shift)); - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); } return 0; } -int smu7_program_jump_on_start(struct pp_smumgr *smumgr) +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr) { static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); return 0; } -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr) +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); @@ -189,53 +189,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) return 0; } -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); return 0; } -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { return -EINVAL; } - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc(smumgr, msg); + return smu7_send_msg_to_smc(hwmgr, msg); } -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc_without_waiting(smumgr, msg); + return smu7_send_msg_to_smc_without_waiting(hwmgr, msg); } -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) + if (1 != SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) pr_info("Failed to send Message.\n"); return 0; } -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr) +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) { - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); return 0; } @@ -289,29 +289,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) } -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); return 0; } -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value); return 0; } @@ -354,14 +354,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) return result; } -static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, +static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, uint32_t fw_type, struct SMU_Entry *entry) { int result = 0; struct cgs_firmware_info info = {0}; - result = cgs_get_firmware_info(smumgr->device, + result = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(fw_type), &info); @@ -374,7 +374,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->meta_data_addr_low = 0; /* digest need be excluded out */ - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) info.image_size -= 20; entry->data_size_byte = info.image_size; entry->num_register_entries = 0; @@ -389,30 +389,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, return 0; } -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; - if (!smumgr->reload_fw) { + if (!hwmgr->smumgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } if (smu_data->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), 0x0); - if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ - if (!cgs_is_virtualization_enabled(smumgr->device)) { - smu7_send_msg_to_smc_with_parameter(smumgr, + if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ + if (!cgs_is_virtualization_enabled(hwmgr->device)) { + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); } @@ -439,80 +439,80 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) toc->num_entries = 0; toc->structure_version = 1; - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + if (cgs_is_virtualization_enabled(hwmgr->device)) + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); - if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) + if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) pr_err("Fail to Request SMU Load uCode"); return result; } /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; - ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + ret = smum_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), fw_mask, fw_mask); return ret; } -int smu7_reload_firmware(struct pp_smumgr *smumgr) +int smu7_reload_firmware(struct pp_hwmgr *hwmgr) { - return smumgr->smumgr_funcs->start_smu(smumgr); + return hwmgr->smumgr->smumgr_funcs->start_smu(hwmgr); } -static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) +static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit) { uint32_t byte_count = length; PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); for (; byte_count >= 4; byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -520,41 +520,41 @@ static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t leng } -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); struct cgs_firmware_info info = {0}; if (smu_data->security_hard_key == 1) - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); else - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - smumgr->is_kicker = info.is_kicker; + hwmgr->smumgr->is_kicker = info.is_kicker; - result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); + result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); return result; } -int smu7_init(struct pp_smumgr *smumgr) +int smu7_init(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data; uint8_t *internal_buf; uint64_t mc_addr = 0; /* Allocate memory for backend private data */ - smu_data = (struct smu7_smumgr *)(smumgr->backend); + smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); smu_data->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; /* Allocate FW image data structure and header buffer and * send the header buffer address to SMU */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->header_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -568,16 +568,16 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != smu_data->header), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->header_buffer.handle); return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; smu_data->smu_buffer.data_size = 200*4096; - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->smu_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -591,12 +591,12 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != internal_buf), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); - if (smum_is_hw_avfs_present(smumgr)) + if (smum_is_hw_avfs_present(hwmgr)) smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; else smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; @@ -605,10 +605,10 @@ int smu7_init(struct pp_smumgr *smumgr) } -int smu7_smu_fini(struct pp_smumgr *smumgr) +int smu7_smu_fini(struct pp_hwmgr *hwmgr) { - kfree(smumgr->backend); - smumgr->backend = NULL; - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index ee5e32d2921ed..0b63c5c1043cf 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -60,32 +60,32 @@ struct smu7_smumgr { }; -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit); -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); -int smu7_program_jump_on_start(struct pp_smumgr *smumgr); -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr); -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr); -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr); +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit); -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr); -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type); -int smu7_reload_firmware(struct pp_smumgr *smumgr); -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr); -int smu7_init(struct pp_smumgr *smumgr); -int smu7_smu_fini(struct pp_smumgr *smumgr); +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr); +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type); +int smu7_reload_firmware(struct pp_hwmgr *hwmgr); +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr); +int smu7_init(struct pp_hwmgr *hwmgr); +int smu7_smu_fini(struct pp_hwmgr *hwmgr); #endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 9c1738f991b66..a58346e78c35d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -151,10 +151,10 @@ int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) return 0; } -uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) +uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member) { - if (NULL != smumgr->smumgr_funcs->get_offsetof) - return smumgr->smumgr_funcs->get_offsetof(type, member); + if (NULL != hwmgr->smumgr->smumgr_funcs->get_offsetof) + return hwmgr->smumgr->smumgr_funcs->get_offsetof(type, member); return 0; } @@ -166,97 +166,96 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr) return 0; } -int smum_get_argument(struct pp_smumgr *smumgr) +int smum_get_argument(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->get_argument) - return smumgr->smumgr_funcs->get_argument(smumgr); + if (NULL != hwmgr->smumgr->smumgr_funcs->get_argument) + return hwmgr->smumgr->smumgr_funcs->get_argument(hwmgr); return 0; } -uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) +uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) { - if (NULL != smumgr->smumgr_funcs->get_mac_definition) - return smumgr->smumgr_funcs->get_mac_definition(value); + if (NULL != hwmgr->smumgr->smumgr_funcs->get_mac_definition) + return hwmgr->smumgr->smumgr_funcs->get_mac_definition(value); return 0; } -int smum_download_powerplay_table(struct pp_smumgr *smumgr, - void **table) +int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table) { - if (NULL != smumgr->smumgr_funcs->download_pptable_settings) - return smumgr->smumgr_funcs->download_pptable_settings(smumgr, + if (NULL != hwmgr->smumgr->smumgr_funcs->download_pptable_settings) + return hwmgr->smumgr->smumgr_funcs->download_pptable_settings(hwmgr, table); return 0; } -int smum_upload_powerplay_table(struct pp_smumgr *smumgr) +int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->upload_pptable_settings) - return smumgr->smumgr_funcs->upload_pptable_settings(smumgr); + if (NULL != hwmgr->smumgr->smumgr_funcs->upload_pptable_settings) + return hwmgr->smumgr->smumgr_funcs->upload_pptable_settings(hwmgr); return 0; } -int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL) + if (hwmgr == NULL || hwmgr->smumgr->smumgr_funcs->send_msg_to_smc == NULL) return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg); + return hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); } -int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || - smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) + if (hwmgr == NULL || + hwmgr->smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( - smumgr, msg, parameter); + return hwmgr->smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( + hwmgr, msg, parameter); } /* * Returns once the part of the register indicated by the mask has * reached the given value. */ -int smum_wait_on_register(struct pp_smumgr *smumgr, +int smum_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask) { uint32_t i; uint32_t cur_value; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, index); + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, index); if ((cur_value & mask) == (value & mask)) break; udelay(1); } /* timeout means wrong logic*/ - if (i == smumgr->usec_timeout) + if (i == hwmgr->usec_timeout) return -1; return 0; } -int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, +int smum_wait_for_register_unequal(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask) { uint32_t i; uint32_t cur_value; - if (smumgr == NULL) + if (hwmgr == NULL) return -EINVAL; - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, index); if ((cur_value & mask) != (value & mask)) break; @@ -264,7 +263,7 @@ int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, } /* timeout means wrong logic */ - if (i == smumgr->usec_timeout) + if (i == hwmgr->usec_timeout) return -1; return 0; @@ -276,31 +275,31 @@ int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, * has reached the given value.The indirect space is described by * giving the memory-mapped index of the indirect index register. */ -int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, +int smum_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, indirect_port, index); - return smum_wait_on_register(smumgr, indirect_port + 1, + cgs_write_register(hwmgr->device, indirect_port, index); + return smum_wait_on_register(hwmgr, indirect_port + 1, mask, value); } void smum_wait_for_indirect_register_unequal( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return; - cgs_write_register(smumgr->device, indirect_port, index); - smum_wait_for_register_unequal(smumgr, indirect_port + 1, + cgs_write_register(hwmgr->device, indirect_port, index); + smum_wait_for_register_unequal(hwmgr, indirect_port + 1, value, mask); } @@ -406,10 +405,10 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, return 0; } -bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr) +bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { - if (smumgr->smumgr_funcs->is_hw_avfs_present) - return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr); + if (hwmgr->smumgr->smumgr_funcs->is_hw_avfs_present) + return hwmgr->smumgr->smumgr_funcs->is_hw_avfs_present(hwmgr); return false; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index a628eec5e6dac..68e1e19b971b9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -690,7 +690,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1048,7 +1048,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1475,7 +1475,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU72_Discrete_MCArbDramTimingTable), @@ -1782,9 +1782,9 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) +static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); uint32_t tmp; int result; @@ -1797,7 +1797,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result != 0) @@ -1806,7 +1806,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1901,7 +1901,7 @@ static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -1987,7 +1987,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -2038,7 +2038,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Hi and Lo Sidd Failed !", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -2048,10 +2048,10 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) { - const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend; + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smumgr->backend; uint32_t i, j; @@ -2092,12 +2092,12 @@ static void tonga_convert_mc_registers( } static int tonga_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU72_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -2127,7 +2127,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = tonga_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -2141,8 +2141,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -2163,7 +2162,7 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); return smu7_copy_bytes_to_smc( - hwmgr->smumgr, address, + hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, @@ -2173,11 +2172,10 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); - result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(!result, "Failed to initialize MCRegTable for the MC register addresses !", return result;); @@ -2187,7 +2185,7 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) "Failed to initialize MCRegTable for driver state !", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); } @@ -2471,7 +2469,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), @@ -2480,7 +2478,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to upload dpm data to SMC memory !", return result;); - result = tonga_init_arb_table_index(hwmgr->smumgr); + result = tonga_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to upload arb data to SMC memory !", return result); @@ -2588,7 +2586,7 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) fan_table.FanControl_GL_Flag = 1; - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), @@ -2630,7 +2628,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2741,7 +2739,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2772,7 +2770,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2798,7 +2796,7 @@ static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2838,7 +2836,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2848,7 +2846,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2861,7 +2859,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2869,7 +2867,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2879,7 +2877,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2889,7 +2887,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -3258,6 +3256,6 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index c35f4c35c9cad..c10e6f89dbae2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -37,125 +37,125 @@ #include "smu7_smumgr.h" -static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) { int result; /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { pr_err("SMU Firmware start failed\n"); return -EINVAL; } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /*Clear firmware interrupt enable flag*/ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /*De-assert reset*/ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int tonga_start_smu(struct pp_smumgr *smumgr) +static int tonga_start_smu(struct pp_hwmgr *hwmgr) { int result; /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) || - cgs_is_virtualization_enabled(smumgr->device))) { + if (!(smu7_is_smc_ram_running(hwmgr) || + cgs_is_virtualization_enabled(hwmgr->device))) { /*Check if SMU is running in protected mode*/ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = tonga_start_in_non_protection_mode(smumgr); + result = tonga_start_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = tonga_start_in_protection_mode(smumgr); + result = tonga_start_in_protection_mode(hwmgr); if (result) return result; } } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -167,7 +167,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int tonga_smu_init(struct pp_smumgr *smumgr) +static int tonga_smu_init(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *tonga_priv = NULL; int i; @@ -176,9 +176,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr) if (tonga_priv == NULL) return -ENOMEM; - smumgr->backend = tonga_priv; + hwmgr->smumgr->backend = tonga_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 408514c965a01..c63127058090c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -53,20 +53,20 @@ #define smnMP0_FW_INTF 0x3010104 #define smnMP1_PUB_CTRL 0x3010b14 -static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) +static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) * @param smumgr the address of the powerplay hardware manager. * @return TRUE SMC has responded, FALSE otherwise. */ -static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + smum_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } /* @@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } /* * Send a message to the SMC, and wait for its response. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) /* * Send a message to the SMC with parameter - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return Always return 0. */ -int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, /* * Send a message to the SMC with parameter, do not wait for response - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return The response that came from the SMC. */ int vega10_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) + struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - return vega10_send_msg_to_smc_without_waiting(smumgr, msg); + return vega10_send_msg_to_smc_without_waiting(hwmgr, msg); } /* * Retrieve an argument from SMC. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param arg pointer to store the argument from SMC. * @return Always return 0. */ -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } /* * Copy table from SMC into driver FB - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the driver's table ID to copy from */ -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smumgr->backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, /* * Copy table from Driver FB into SMC - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the table to copy from */ -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smumgr->backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask) { int msg = enable ? PPSMC_MSG_EnableSmuFeatures : PPSMC_MSG_DisableSmuFeatures; - return vega10_send_msg_to_smc_with_parameter(smumgr, + return vega10_send_msg_to_smc_with_parameter(hwmgr, msg, feature_mask); } -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled) { if (features_enabled == NULL) return -EINVAL; - if (!vega10_send_msg_to_smc(smumgr, + if (!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures)) { - vega10_read_arg_from_smc(smumgr, features_enabled); + vega10_read_arg_from_smc(hwmgr, features_enabled); return 0; } return -EINVAL; } -int vega10_set_tools_address(struct pp_smumgr *smumgr) +int vega10_set_tools_address(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smumgr->backend); if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { - if (!vega10_send_msg_to_smc_with_parameter(smumgr, + if (!vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) - vega10_send_msg_to_smc_with_parameter(smumgr, + vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, priv->smu_tables.entry[TOOLSTABLE].table_addr_low); } return 0; } -static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) +static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; struct cgs_system_info sys_info = {0}; uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - vega10_read_arg_from_smc(smumgr, &smc_driver_if_version); + vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version); sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); dev_id = (uint32_t)sys_info.value; sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); rev_id = (uint32_t)sys_info.value; if (!((dev_id == 0x687f) && @@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_init(struct pp_smumgr *smumgr) +static int vega10_smu_init(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv; uint64_t mc_addr; @@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) int ret; struct cgs_firmware_info info = {0}; - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (ret || !info.kptr) @@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smumgr->backend = priv; /* allocate space for pptable */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(PPTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for pptable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[PPTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for AVFS table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) tools_size = 0x19000; if (tools_size) { - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, tools_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) } /* allocate space for AVFS Fuse table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsFuseOverride_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs fuse table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smumgr->backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_fini(struct pp_smumgr *smumgr) +static int vega10_smu_fini(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smumgr->backend); if (priv) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); if (priv->smu_tables.entry[TOOLSTABLE].table) - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smumgr->backend); + hwmgr->smumgr->backend = NULL; } return 0; } -static int vega10_start_smu(struct pp_smumgr *smumgr) +static int vega10_start_smu(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr), + PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), "Failed to verify SMC interface!", return -EINVAL); - vega10_set_tools_address(smumgr); + vega10_set_tools_address(hwmgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h index 821425c1e4e0b..0695455b21b22 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -52,19 +52,19 @@ struct vega10_smumgr { struct smu_table_array smu_tables; }; -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask); -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled); -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); -int vega10_set_tools_address(struct pp_smumgr *smumgr); +int vega10_set_tools_address(struct pp_hwmgr *hwmgr); #endif From 221c89f980ea96a6baf80b17b6c6a618fc366e73 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 15:41:33 +0800 Subject: [PATCH 214/232] drm/amd/powerplay: delete dead code in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index fa83e69ba9e13..1b212b57edc04 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -62,10 +62,6 @@ struct vi_dpm_table { struct vi_dpm_level dpm_level[1]; }; -enum PP_Result { - PP_Result_TableImmediateExit = 0x13, -}; - #define PCIE_PERF_REQ_REMOVE_REGISTRY 0 #define PCIE_PERF_REQ_FORCE_LOWPOWER 1 #define PCIE_PERF_REQ_GEN1 2 @@ -104,13 +100,6 @@ enum PHM_BackEnd_Magic { PHM_Rv_Magic = 0x20161121 }; - -#define PHM_PCIE_POWERGATING_TARGET_GFX 0 -#define PHM_PCIE_POWERGATING_TARGET_DDI 1 -#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2 -#define PHM_PCIE_POWERGATING_TARGET_PHY 3 - - struct phm_set_power_state_input { const struct pp_hw_power_state *pcurrent_state; const struct pp_hw_power_state *pnew_state; From b3b030520df05fca7f2dcca455c9628f483a1f95 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 26 Sep 2017 13:28:27 -0400 Subject: [PATCH 215/232] drm/amd/powerplay: refine powerplay code. delete struct smumgr, put smu backend function table in struct hwmgr Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 62 +++----- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 19 ++- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 +- .../drm/amd/powerplay/hwmgr/smu7_powertune.c | 2 +- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 40 +++++ .../gpu/drm/amd/powerplay/inc/pp_instance.h | 2 - drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 62 +------- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 58 +++---- .../gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 6 +- .../gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 38 ++--- .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 54 +++---- .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 10 +- .../drm/amd/powerplay/smumgr/iceland_smc.c | 50 +++--- .../drm/amd/powerplay/smumgr/iceland_smumgr.c | 2 +- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 67 ++++---- .../amd/powerplay/smumgr/polaris10_smumgr.c | 10 +- .../gpu/drm/amd/powerplay/smumgr/rv_smumgr.c | 20 +-- .../drm/amd/powerplay/smumgr/smu7_smumgr.c | 22 +-- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 148 +++++------------- .../gpu/drm/amd/powerplay/smumgr/tonga_smc.c | 64 ++++---- .../drm/amd/powerplay/smumgr/tonga_smumgr.c | 2 +- .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 22 +-- 22 files changed, 339 insertions(+), 425 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index c37ea9543ca38..9f3f3b8cf64f2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -35,13 +35,13 @@ static inline int pp_check(struct pp_instance *handle) if (handle == NULL || handle->pp_valid != PP_VALID) return -EINVAL; - if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL) + if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) return -EINVAL; if (handle->pm_en == 0) return PP_DPM_DISABLED; - if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL) + if (handle->hwmgr->hwmgr_func == NULL) return PP_DPM_DISABLED; return 0; @@ -52,38 +52,32 @@ static int pp_early_init(void *handle) int ret; struct pp_instance *pp_handle = (struct pp_instance *)handle; - ret = smum_early_init(pp_handle); + ret = hwmgr_early_init(pp_handle); if (ret) - return ret; + return -EINVAL; if ((pp_handle->pm_en == 0) || cgs_is_virtualization_enabled(pp_handle->device)) return PP_DPM_DISABLED; - ret = hwmgr_early_init(pp_handle); - if (ret) { - pp_handle->pm_en = 0; - return PP_DPM_DISABLED; - } - return 0; } static int pp_sw_init(void *handle) { - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->smu_init == NULL) + if (hwmgr->smumgr_funcs->smu_init == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_init(pp_handle->hwmgr); + ret = hwmgr->smumgr_funcs->smu_init(hwmgr); pr_info("amdgpu: powerplay sw initialized\n"); } @@ -92,39 +86,39 @@ static int pp_sw_init(void *handle) static int pp_sw_fini(void *handle) { - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->smu_fini == NULL) + if (hwmgr->smumgr_funcs->smu_fini == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); + ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); } return ret; } static int pp_hw_init(void *handle) { - struct pp_smumgr *smumgr; int ret = 0; struct pp_instance *pp_handle = (struct pp_instance *)handle; + struct pp_hwmgr *hwmgr; ret = pp_check(pp_handle); if (ret == 0 || ret == PP_DPM_DISABLED) { - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->start_smu == NULL) + if (hwmgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - if(smumgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { + if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); + hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return -EINVAL;; } if (ret == PP_DPM_DISABLED) @@ -137,8 +131,6 @@ static int pp_hw_init(void *handle) return 0; err: pp_handle->pm_en = 0; - kfree(pp_handle->hwmgr); - pp_handle->hwmgr = NULL; return PP_DPM_DISABLED; } @@ -232,7 +224,7 @@ static int pp_suspend(void *handle) static int pp_resume(void *handle) { - struct pp_smumgr *smumgr; + struct pp_hwmgr *hwmgr; int ret, ret1; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -241,15 +233,15 @@ static int pp_resume(void *handle) if (ret1 != 0 && ret1 != PP_DPM_DISABLED) return ret1; - smumgr = pp_handle->smu_mgr; + hwmgr = pp_handle->hwmgr; - if (smumgr->smumgr_funcs->start_smu == NULL) + if (hwmgr->smumgr_funcs->start_smu == NULL) return -EINVAL; - ret = smumgr->smumgr_funcs->start_smu(pp_handle->hwmgr); + ret = hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr); if (ret) { pr_err("smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); + hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); return ret; } @@ -1157,13 +1149,9 @@ int amd_powerplay_destroy(void *handle) { struct pp_instance *instance = (struct pp_instance *)handle; - if (instance->pm_en) { - kfree(instance->hwmgr); - instance->hwmgr = NULL; - } + kfree(instance->hwmgr); + instance->hwmgr = NULL; - kfree(instance->smu_mgr); - instance->smu_mgr = NULL; kfree(instance); instance = NULL; return 0; @@ -1174,7 +1162,7 @@ int amd_powerplay_reset(void *handle) struct pp_instance *instance = (struct pp_instance *)handle; int ret; - if (cgs_is_virtualization_enabled(instance->smu_mgr->device)) + if (cgs_is_virtualization_enabled(instance->hwmgr->device)) return PP_DPM_DISABLED; ret = pp_check(instance); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 16101c392c3b9..9c1479dcf79ce 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -37,6 +37,15 @@ #include "amd_acpi.h" #include "pp_psm.h" +extern const struct pp_smumgr_func ci_smu_funcs; +extern const struct pp_smumgr_func cz_smu_funcs; +extern const struct pp_smumgr_func iceland_smu_funcs; +extern const struct pp_smumgr_func tonga_smu_funcs; +extern const struct pp_smumgr_func fiji_smu_funcs; +extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vega10_smu_funcs; +extern const struct pp_smumgr_func rv_smu_funcs; + extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); @@ -132,7 +141,6 @@ int hwmgr_early_init(struct pp_instance *handle) return -ENOMEM; handle->hwmgr = hwmgr; - hwmgr->smumgr = handle->smu_mgr; hwmgr->device = handle->device; hwmgr->chip_family = handle->chip_family; hwmgr->chip_id = handle->chip_id; @@ -144,9 +152,11 @@ int hwmgr_early_init(struct pp_instance *handle) hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); hwmgr->fan_ctrl_is_in_default_mode = true; + hwmgr->reload_fw = 1; switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CI: + hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); @@ -154,21 +164,25 @@ int hwmgr_early_init(struct pp_instance *handle) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_CZ: + hwmgr->smumgr_funcs = &cz_smu_funcs; cz_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TOPAZ: + hwmgr->smumgr_funcs = &iceland_smu_funcs; topaz_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); hwmgr->pp_table_version = PP_TABLE_V0; break; case CHIP_TONGA: + hwmgr->smumgr_funcs = &tonga_smu_funcs; tonga_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK; break; case CHIP_FIJI: + hwmgr->smumgr_funcs = &fiji_smu_funcs; fiji_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); @@ -176,6 +190,7 @@ int hwmgr_early_init(struct pp_instance *handle) case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: + hwmgr->smumgr_funcs = &polaris10_smu_funcs; polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; @@ -187,6 +202,7 @@ int hwmgr_early_init(struct pp_instance *handle) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); break; default: @@ -196,6 +212,7 @@ int hwmgr_early_init(struct pp_instance *handle) case AMDGPU_FAMILY_RV: switch (hwmgr->chip_id) { case CHIP_RAVEN: + hwmgr->smumgr_funcs = &rv_smu_funcs; rv_init_function_pointers(hwmgr); break; default: diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index bc2f227559cdf..8dbe9148aad35 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1382,7 +1382,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->force_pcie_gen = PP_PCIEGenInvalid; data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; - if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) { + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { uint8_t tmp1, tmp2; uint16_t tmp3 = 0; atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, @@ -4623,7 +4623,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (smu_data == NULL) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index a20d67a78b70e..85ca16abb626f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -763,7 +763,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - if (hwmgr->smumgr->is_kicker) + if (hwmgr->is_kicker) result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); else result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 1b212b57edc04..859cca496b446 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -235,6 +235,39 @@ struct phm_vce_clock_voltage_dependency_table { struct phm_vce_clock_voltage_dependency_record entries[1]; }; +struct pp_smumgr_func { + int (*smu_init)(struct pp_hwmgr *hwmgr); + int (*smu_fini)(struct pp_hwmgr *hwmgr); + int (*start_smu)(struct pp_hwmgr *hwmgr); + int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); + int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + int (*get_argument)(struct pp_hwmgr *hwmgr); + int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); + int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); + int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, + void **table); + int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); + int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); + int (*process_firmware_header)(struct pp_hwmgr *hwmgr); + int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); + int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); + int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); + int (*init_smc_table)(struct pp_hwmgr *hwmgr); + int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); + int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); + int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); + uint32_t (*get_offsetof)(uint32_t type, uint32_t member); + uint32_t (*get_mac_definition)(uint32_t value); + bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); + int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); +}; + struct pp_hwmgr_func { int (*backend_init)(struct pp_hwmgr *hw_mgr); int (*backend_fini)(struct pp_hwmgr *hw_mgr); @@ -706,10 +739,17 @@ struct pp_hwmgr { void *pptable; struct phm_platform_descriptor platform_descriptor; void *backend; + + void *smu_backend; + const struct pp_smumgr_func *smumgr_funcs; + bool is_kicker; + bool reload_fw; + enum PP_DAL_POWERLEVEL dal_power_level; struct phm_dynamic_state_info dyn_state; const struct pp_hwmgr_func *hwmgr_func; const struct pp_table_func *pptable_func; + struct pp_power_state *ps; enum pp_power_source power_source; uint32_t num_ps; diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h index 5bf2ee449e42c..25fb1460a1940 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -23,7 +23,6 @@ #ifndef _PP_INSTANCE_H_ #define _PP_INSTANCE_H_ -#include "smumgr.h" #include "hwmgr.h" #define PP_VALID 0x1F1F1F1F @@ -35,7 +34,6 @@ struct pp_instance { bool pm_en; uint32_t feature_mask; void *device; - struct pp_smumgr *smu_mgr; struct pp_hwmgr *hwmgr; struct mutex pp_lock; }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 58581e1bbf501..8bdffaa14b431 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -23,24 +23,13 @@ #ifndef _SMUMGR_H_ #define _SMUMGR_H_ #include -#include "pp_instance.h" #include "amd_powerplay.h" - -struct pp_smumgr; -struct pp_instance; -struct pp_hwmgr; +#include "hwmgr.h" #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) -extern const struct pp_smumgr_func ci_smu_funcs; -extern const struct pp_smumgr_func cz_smu_funcs; -extern const struct pp_smumgr_func iceland_smu_funcs; -extern const struct pp_smumgr_func tonga_smu_funcs; -extern const struct pp_smumgr_func fiji_smu_funcs; -extern const struct pp_smumgr_func polaris10_smu_funcs; -extern const struct pp_smumgr_func vega10_smu_funcs; -extern const struct pp_smumgr_func rv_smu_funcs; + enum AVFS_BTC_STATUS { AVFS_BTC_BOOT = 0, @@ -101,53 +90,6 @@ enum SMU_MAC_DEFINITION { SMU_UVD_MCLK_HANDSHAKE_DISABLE, }; - -struct pp_smumgr_func { - int (*smu_init)(struct pp_hwmgr *hwmgr); - int (*smu_fini)(struct pp_hwmgr *hwmgr); - int (*start_smu)(struct pp_hwmgr *hwmgr); - int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, - uint32_t firmware); - int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); - int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, - uint32_t firmware); - int (*get_argument)(struct pp_hwmgr *hwmgr); - int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); - int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter); - int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, - void **table); - int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); - int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); - int (*process_firmware_header)(struct pp_hwmgr *hwmgr); - int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); - int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); - int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); - int (*init_smc_table)(struct pp_hwmgr *hwmgr); - int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); - int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); - int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); - uint32_t (*get_offsetof)(uint32_t type, uint32_t member); - uint32_t (*get_mac_definition)(uint32_t value); - bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); - int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, - struct amd_pp_profile *request); - bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); -}; - -struct pp_smumgr { - uint32_t chip_family; - uint32_t chip_id; - void *device; - void *backend; - uint32_t usec_timeout; - bool reload_fw; - const struct pp_smumgr_func *smumgr_funcs; - bool is_kicker; -}; - -extern int smum_early_init(struct pp_instance *handle); - extern int smum_get_argument(struct pp_hwmgr *hwmgr); extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index 2710a6fa3df03..c2fc237a136a1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -236,7 +236,7 @@ int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); struct cgs_system_info sys_info = {0}; uint32_t dev_id; @@ -479,7 +479,7 @@ static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result = 0; uint32_t array = smu_data->dpm_table_start + @@ -520,7 +520,7 @@ int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -534,7 +534,7 @@ static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); @@ -549,7 +549,7 @@ static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; @@ -568,8 +568,8 @@ static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - uint16_t tmp = 0; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + uint16_t tmp; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) @@ -585,7 +585,7 @@ static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_off static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) { int i; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2; @@ -614,7 +614,7 @@ static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) { int i; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint8_t *vid = smu_data->power_tune_table.VddCVid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -630,7 +630,7 @@ static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; int i, min, max; @@ -662,7 +662,7 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; @@ -680,7 +680,7 @@ static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; int ret = 0; @@ -722,7 +722,7 @@ static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); @@ -997,7 +997,7 @@ static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmT { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/ @@ -1300,7 +1300,7 @@ static int ci_populate_single_memory_level( int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; struct cgs_system_info sys_info = {0}; @@ -1684,7 +1684,7 @@ static int ci_populate_memory_timing_parameters( static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); int result = 0; SMU7_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1721,7 +1721,7 @@ static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, { int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1759,7 +1759,7 @@ static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU7_Discrete_MCRegisters *mc_reg_table) { - const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smumgr->backend; + const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -1801,7 +1801,7 @@ static int ci_convert_mc_reg_table_entry_to_smc( SMU7_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1845,7 +1845,7 @@ static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1872,7 +1872,7 @@ static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); @@ -1890,7 +1890,7 @@ static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint8_t count, level; count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); @@ -1948,7 +1948,7 @@ int ci_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct pp_atomctrl_gpio_pin_assignment gpio_pin; u32 i; @@ -2127,7 +2127,7 @@ int ci_init_smc_table(struct pp_hwmgr *hwmgr) int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2214,7 +2214,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2311,7 +2311,7 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); - hwmgr->smumgr->is_kicker = info.is_kicker; + hwmgr->is_kicker = info.is_kicker; byte_count = info.image_size; src = (uint8_t *)info.kptr; start_addr = info.ucode_start_address; @@ -2358,7 +2358,7 @@ static int ci_upload_firmware(struct pp_hwmgr *hwmgr) int ci_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); uint32_t tmp = 0; int result; @@ -2670,7 +2670,7 @@ static int ci_set_valid_flag(struct ci_mc_reg_table *table) int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smumgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = ci_get_memory_modile_index(hwmgr); @@ -2731,7 +2731,7 @@ int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct ci_smumgr *smu_data = (struct ci_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU7_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->dpm_table_start + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index d2e24e3a963d7..f265f42a7ed38 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -43,15 +43,15 @@ static int ci_smu_init(struct pp_hwmgr *hwmgr) for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) ci_priv->activity_target[i] = 30; - hwmgr->smumgr->backend = ci_priv; + hwmgr->smu_backend = ci_priv; return 0; } static int ci_smu_fini(struct pp_hwmgr *hwmgr) { - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index a6fa0e86a8fd3..8aee9c817ff98 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -181,7 +181,7 @@ static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); @@ -330,7 +330,7 @@ static int cz_smu_populate_single_scratch_task( uint8_t type, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; @@ -367,7 +367,7 @@ static int cz_smu_populate_single_ucode_load_task( bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; @@ -393,7 +393,7 @@ static int cz_smu_populate_single_ucode_load_task( static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; cz_smu_populate_single_scratch_task(hwmgr, @@ -406,7 +406,7 @@ static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) { int i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) @@ -417,7 +417,7 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; @@ -435,7 +435,7 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; @@ -477,7 +477,7 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; @@ -489,7 +489,7 @@ static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; @@ -517,7 +517,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; @@ -530,7 +530,7 @@ static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_used_count = 0; cz_smu_initialize_toc_empty_job_list(hwmgr); @@ -546,7 +546,7 @@ static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; uint32_t firmware_type; uint32_t i; int ret; @@ -588,7 +588,7 @@ static int cz_smu_populate_single_scratch_entry( uint32_t ulsize_byte, struct cz_buffer_entry *entry) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; long long mc_addr = ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) | cz_smu->smu_buffer.mc_addr_low; @@ -611,7 +611,7 @@ static int cz_smu_populate_single_scratch_entry( static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -640,7 +640,7 @@ static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -667,10 +667,10 @@ static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smumgr->backend); + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend); uint32_t smc_address; - if (!hwmgr->smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } @@ -745,7 +745,7 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) if (cz_smu == NULL) return -ENOMEM; - hwmgr->smumgr->backend = cz_smu; + hwmgr->smu_backend = cz_smu; cz_smu->toc_buffer.data_size = 4096; cz_smu->smu_buffer.data_size = @@ -830,7 +830,7 @@ static int cz_smu_fini(struct pp_hwmgr *hwmgr) if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)hwmgr->smumgr->backend; + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; if (cz_smu) { cgs_free_gpu_mem(hwmgr->device, cz_smu->toc_buffer.handle); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 843ed7a665f65..b1a66b5ada4ac 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -198,7 +198,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -216,7 +216,7 @@ static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); @@ -299,7 +299,7 @@ static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -314,7 +314,7 @@ static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -334,7 +334,7 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; @@ -359,7 +359,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -370,7 +370,7 @@ static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -389,7 +389,7 @@ static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -400,7 +400,7 @@ static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -421,7 +421,7 @@ static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) { uint32_t pm_fuse_table_offset; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { @@ -575,7 +575,7 @@ static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int i; /* Index (dpm_table->pcie_speed_table.count) @@ -763,7 +763,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_information *table_info = @@ -989,7 +989,7 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -1341,7 +1341,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1449,7 +1449,7 @@ static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1480,7 +1480,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, volt_with_cks, value; uint16_t clock_freq_u16; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1685,7 +1685,7 @@ static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1712,7 +1712,7 @@ static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1788,7 +1788,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -2011,7 +2011,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) */ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; @@ -2122,7 +2122,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) return 0; @@ -2150,7 +2150,7 @@ static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2244,7 +2244,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2276,7 +2276,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2308,7 +2308,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2361,7 +2361,7 @@ int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; @@ -2464,7 +2464,7 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct fiji_smumgr *smu_data = (struct fiji_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index dfdcff54947ad..eafac957b0f6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -166,7 +166,7 @@ static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr) uint32_t reg, data; const PWR_Command_Table *pvirus = PwrVirusTable; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { @@ -195,7 +195,7 @@ static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr) static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, @@ -255,7 +255,7 @@ static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -296,7 +296,7 @@ static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) static int fiji_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ if (!(smu7_is_smc_ram_running(hwmgr) @@ -375,7 +375,7 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr) if (fiji_priv == NULL) return -ENOMEM; - hwmgr->smumgr->backend = fiji_priv; + hwmgr->smu_backend = fiji_priv; if (smu7_init(hwmgr)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 89d5a272e2368..efb0fc0332741 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -101,7 +101,7 @@ static const struct iceland_pt_defaults defaults_icelandpro = { static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct cgs_system_info sys_info = {0}; uint32_t dev_id; @@ -130,7 +130,7 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -144,7 +144,7 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); @@ -159,7 +159,7 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; @@ -184,7 +184,7 @@ static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 8; i++) @@ -195,7 +195,7 @@ static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; @@ -214,7 +214,7 @@ static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; @@ -240,7 +240,7 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *vid = smu_data->power_tune_table.VddCVid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -259,7 +259,7 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -590,7 +590,7 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -805,7 +805,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); @@ -1207,7 +1207,7 @@ static int iceland_populate_single_memory_level( int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1485,7 +1485,7 @@ static int iceland_populate_memory_timing_parameters( static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; SMU71_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1523,7 +1523,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, { int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1564,7 +1564,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) { - const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smumgr->backend; + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -1606,7 +1606,7 @@ static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1650,7 +1650,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1678,7 +1678,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); @@ -1696,7 +1696,7 @@ static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t count, level; count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); @@ -1725,7 +1725,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; @@ -1813,7 +1813,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1980,7 +1980,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) */ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2070,7 +2070,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2168,7 +2168,7 @@ uint32_t iceland_get_mac_definition(uint32_t value) int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -2508,7 +2508,7 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = iceland_get_memory_modile_index(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index d665272993083..fd63d2800d05d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -208,7 +208,7 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr) if (iceland_priv == NULL) return -ENOMEM; - hwmgr->smumgr->backend = iceland_priv; + hwmgr->smu_backend = iceland_priv; if (smu7_init(hwmgr)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 118315186bc3d..d0913a6696fde 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -148,7 +148,7 @@ static uint16_t scale_fan_gain_settings(uint16_t raw_setting) static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -196,7 +196,7 @@ static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmg static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -210,7 +210,7 @@ static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -227,7 +227,7 @@ static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; @@ -252,7 +252,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -263,7 +263,7 @@ static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* TO DO move to hwmgr */ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) @@ -279,7 +279,7 @@ static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -290,7 +290,7 @@ static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -310,7 +310,7 @@ static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -492,7 +492,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; else state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; @@ -514,7 +514,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int i; @@ -545,7 +545,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, SMU74_Discrete_DpmTable *table) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t i, ref_clk; struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; @@ -595,7 +595,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, uint32_t clock, SMU_SclkSetting *sclk_setting) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct pp_atomctrl_clock_dividers_ai dividers; uint32_t ref_clock; @@ -739,7 +739,7 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -887,7 +887,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -1187,7 +1187,7 @@ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1306,7 +1306,7 @@ static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1337,7 +1337,7 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1420,7 +1420,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint16_t config; config = VR_MERGED_WITH_VDDC; @@ -1464,7 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); int result = 0; @@ -1552,7 +1552,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) */ static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1579,7 +1579,7 @@ static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1596,7 +1596,7 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1640,7 +1640,8 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1868,7 +1869,7 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) @@ -1898,7 +1899,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) */ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2006,7 +2007,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2038,7 +2039,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2070,7 +2071,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2098,7 +2099,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; @@ -2136,7 +2137,7 @@ int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2241,7 +2242,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) */ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t tmp; int result; @@ -2321,7 +2322,7 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 55ba76202aa34..884ba2ca5399a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -67,7 +67,7 @@ static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr) uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { @@ -96,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr) static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { @@ -174,7 +174,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) static int polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -310,7 +310,7 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) static int polaris10_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ @@ -371,7 +371,7 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr) if (smu_data == NULL) return -ENOMEM; - hwmgr->smumgr->backend = smu_data; + hwmgr->smu_backend = smu_data; if (smu7_init(hwmgr)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index 54d3052dd1571..f9afe88569d1b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c @@ -159,7 +159,7 @@ int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(hwmgr->smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -192,7 +192,7 @@ int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(hwmgr->smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -287,7 +287,7 @@ static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr) static int rv_smu_fini(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv = - (struct rv_smumgr *)(hwmgr->smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); if (priv) { rv_smc_disable_sdma(hwmgr); @@ -296,8 +296,8 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[WMTABLE].handle); cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[CLOCKTABLE].handle); - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; @@ -327,7 +327,7 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) if (!priv) return -ENOMEM; - hwmgr->smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for watermarks table */ smu_allocate_memory(hwmgr->device, @@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for wmtable.", - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[WMTABLE].version = 0x01; @@ -367,8 +367,8 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) "[rv_smu_init] Out of memory for CLOCKTABLE.", cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[CLOCKTABLE].version = 0x01; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index cd283e5af68ca..412cf6f74f670 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -391,12 +391,12 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; - if (!hwmgr->smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } @@ -483,7 +483,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; @@ -497,7 +497,7 @@ int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) int smu7_reload_firmware(struct pp_hwmgr *hwmgr) { - return hwmgr->smumgr->smumgr_funcs->start_smu(hwmgr); + return hwmgr->smumgr_funcs->start_smu(hwmgr); } static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit) @@ -523,7 +523,7 @@ static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct cgs_firmware_info info = {0}; @@ -534,7 +534,7 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr) cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - hwmgr->smumgr->is_kicker = info.is_kicker; + hwmgr->is_kicker = info.is_kicker; result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); @@ -548,7 +548,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) uint64_t mc_addr = 0; /* Allocate memory for backend private data */ - smu_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); smu_data->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; @@ -568,7 +568,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != smu_data->header), "Out of memory.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->header_buffer.handle); return -EINVAL); @@ -591,7 +591,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != internal_buf), "Out of memory.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); @@ -607,8 +607,8 @@ int smu7_init(struct pp_hwmgr *hwmgr) int smu7_smu_fini(struct pp_hwmgr *hwmgr) { - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index a58346e78c35d..d3c12e0ca4647 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -27,7 +27,6 @@ #include #include #include -#include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -46,89 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); -int smum_early_init(struct pp_instance *handle) -{ - struct pp_smumgr *smumgr; - - if (handle == NULL) - return -EINVAL; - - smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); - if (smumgr == NULL) - return -ENOMEM; - - smumgr->device = handle->device; - smumgr->chip_family = handle->chip_family; - smumgr->chip_id = handle->chip_id; - smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; - smumgr->reload_fw = 1; - handle->smu_mgr = smumgr; - - switch (smumgr->chip_family) { - case AMDGPU_FAMILY_CI: - smumgr->smumgr_funcs = &ci_smu_funcs; - break; - case AMDGPU_FAMILY_CZ: - smumgr->smumgr_funcs = &cz_smu_funcs; - break; - case AMDGPU_FAMILY_VI: - switch (smumgr->chip_id) { - case CHIP_TOPAZ: - smumgr->smumgr_funcs = &iceland_smu_funcs; - break; - case CHIP_TONGA: - smumgr->smumgr_funcs = &tonga_smu_funcs; - break; - case CHIP_FIJI: - smumgr->smumgr_funcs = &fiji_smu_funcs; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - case CHIP_POLARIS12: - smumgr->smumgr_funcs = &polaris10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_AI: - switch (smumgr->chip_id) { - case CHIP_VEGA10: - smumgr->smumgr_funcs = &vega10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_RV: - switch (smumgr->chip_id) { - case CHIP_RAVEN: - smumgr->smumgr_funcs = &rv_smu_funcs; - break; - default: - return -EINVAL; - } - break; - default: - kfree(smumgr); - return -EINVAL; - } - - return 0; -} - int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) - return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable) + return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr); return 0; } int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) - return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table) + return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); return 0; } @@ -136,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) - return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); + if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold) + return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr); return 0; } @@ -145,74 +73,74 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) - return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); + if (NULL != hwmgr->smumgr_funcs->update_smc_table) + return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type); return 0; } uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member) { - if (NULL != hwmgr->smumgr->smumgr_funcs->get_offsetof) - return hwmgr->smumgr->smumgr_funcs->get_offsetof(type, member); + if (NULL != hwmgr->smumgr_funcs->get_offsetof) + return hwmgr->smumgr_funcs->get_offsetof(type, member); return 0; } int smum_process_firmware_header(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) - return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); + if (NULL != hwmgr->smumgr_funcs->process_firmware_header) + return hwmgr->smumgr_funcs->process_firmware_header(hwmgr); return 0; } int smum_get_argument(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->get_argument) - return hwmgr->smumgr->smumgr_funcs->get_argument(hwmgr); + if (NULL != hwmgr->smumgr_funcs->get_argument) + return hwmgr->smumgr_funcs->get_argument(hwmgr); return 0; } uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) { - if (NULL != hwmgr->smumgr->smumgr_funcs->get_mac_definition) - return hwmgr->smumgr->smumgr_funcs->get_mac_definition(value); + if (NULL != hwmgr->smumgr_funcs->get_mac_definition) + return hwmgr->smumgr_funcs->get_mac_definition(value); return 0; } int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table) { - if (NULL != hwmgr->smumgr->smumgr_funcs->download_pptable_settings) - return hwmgr->smumgr->smumgr_funcs->download_pptable_settings(hwmgr, + if (NULL != hwmgr->smumgr_funcs->download_pptable_settings) + return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr, table); return 0; } int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->upload_pptable_settings) - return hwmgr->smumgr->smumgr_funcs->upload_pptable_settings(hwmgr); + if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings) + return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr); return 0; } int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - if (hwmgr == NULL || hwmgr->smumgr->smumgr_funcs->send_msg_to_smc == NULL) + if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL) return -EINVAL; - return hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); + return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); } int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { if (hwmgr == NULL || - hwmgr->smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) + hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) return -EINVAL; - return hwmgr->smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( + return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( hwmgr, msg, parameter); } @@ -356,24 +284,24 @@ int smu_free_memory(void *device, void *handle) int smum_init_smc_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) - return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->init_smc_table) + return hwmgr->smumgr_funcs->init_smc_table(hwmgr); return 0; } int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels) + return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); return 0; } int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels) + return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr); return 0; } @@ -381,16 +309,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) /*this interface is needed by island ci/vi */ int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) - return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table) + return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); return 0; } bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) - return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); + if (NULL != hwmgr->smumgr_funcs->is_dpm_running) + return hwmgr->smumgr_funcs->is_dpm_running(hwmgr); return true; } @@ -398,8 +326,8 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { - if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels( + if (hwmgr->smumgr_funcs->populate_requested_graphic_levels) + return hwmgr->smumgr_funcs->populate_requested_graphic_levels( hwmgr, request); return 0; @@ -407,8 +335,8 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { - if (hwmgr->smumgr->smumgr_funcs->is_hw_avfs_present) - return hwmgr->smumgr->smumgr_funcs->is_hw_avfs_present(hwmgr); + if (hwmgr->smumgr_funcs->is_hw_avfs_present) + return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr); return false; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 68e1e19b971b9..1f720ccdaf999 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -406,7 +406,7 @@ static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_ { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -598,7 +598,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; @@ -1002,7 +1002,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1090,7 +1090,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, { int result = 0; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct pp_atomctrl_clock_dividers_vi dividers; @@ -1454,7 +1454,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; SMU72_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1492,7 +1492,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1543,7 +1543,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) volt_with_cks, value; uint16_t clock_freq_u16; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1784,7 +1784,7 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, */ static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1814,7 +1814,7 @@ static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = @@ -1861,7 +1861,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -1876,7 +1876,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1897,7 +1897,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; @@ -1919,7 +1919,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1930,7 +1930,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -1949,7 +1949,7 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1961,7 +1961,7 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -1982,7 +1982,7 @@ static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -2051,7 +2051,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) { - const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smumgr->backend; + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -2097,7 +2097,7 @@ static int tonga_convert_mc_reg_table_entry_to_smc( SMU72_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -2141,7 +2141,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -2172,7 +2172,7 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); @@ -2191,7 +2191,7 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2207,7 +2207,7 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -2253,7 +2253,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2507,7 +2507,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2611,7 +2611,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2714,7 +2714,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2748,7 +2748,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2778,7 +2778,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; smu_data->smc_state_table.SamuBootLevel = 0; @@ -2830,7 +2830,7 @@ int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -3156,7 +3156,7 @@ static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = tonga_get_memory_modile_index(hwmgr); @@ -3239,7 +3239,7 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct tonga_smumgr *smu_data = (struct tonga_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index c10e6f89dbae2..105cb2a3ff6d9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -176,7 +176,7 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr) if (tonga_priv == NULL) return -ENOMEM; - hwmgr->smumgr->backend = tonga_priv; + hwmgr->smu_backend = tonga_priv; if (smu7_init(hwmgr)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index c63127058090c..4cb5d3460fef0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -224,7 +224,7 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(hwmgr->smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -262,7 +262,7 @@ int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(hwmgr->smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -339,7 +339,7 @@ int vega10_get_smc_features(struct pp_hwmgr *hwmgr, int vega10_set_tools_address(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(hwmgr->smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { @@ -412,7 +412,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) if (!priv) return -ENOMEM; - hwmgr->smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for pptable */ smu_allocate_memory(hwmgr->device, @@ -425,7 +425,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for pptable.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -451,7 +451,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for wmtable.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); cgs_free_gpu_mem(hwmgr->device, @@ -479,7 +479,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs table.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); cgs_free_gpu_mem(hwmgr->device, @@ -532,7 +532,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs fuse table.", - kfree(hwmgr->smumgr->backend); + kfree(hwmgr->smu_backend); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); cgs_free_gpu_mem(hwmgr->device, @@ -561,7 +561,7 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr) static int vega10_smu_fini(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(hwmgr->smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv) { cgs_free_gpu_mem(hwmgr->device, @@ -575,8 +575,8 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr) (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); - kfree(hwmgr->smumgr->backend); - hwmgr->smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; } From be49be4085d977af566e8d2f9c52ecc1f31b59ad Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 16:49:29 +0800 Subject: [PATCH 216/232] drm/amd/powerplay: use SMU_IND_INDEX/DATA_11 pair in VFPF macros to support virtualization Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 4 ++-- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 8bdffaa14b431..54b151b03aa85 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -195,12 +195,12 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ port, index, value, mask) \ smum_wait_on_indirect_register(hwmgr, \ - mm##port##_INDEX_0, index, value, mask) + mm##port##_INDEX_11, index, value, mask) #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ port, index, value, mask) \ smum_wait_for_indirect_register_unequal(hwmgr, \ - mm##port##_INDEX_0, index, value, mask) + mm##port##_INDEX_11, index, value, mask) #define SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index c2fc237a136a1..ccd65819722b2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -2345,7 +2345,7 @@ static int ci_upload_firmware(struct pp_hwmgr *hwmgr) pr_info("smc is running, no need to load smc firmware\n"); return 0; } - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 1); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); From d92cb1629bcc8cdf4d616f144ced399723816ba3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 19:22:01 +0800 Subject: [PATCH 217/232] drm/amd/powerplay: add new helper functions in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 42 +++++++++++++++++-- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 10 ++++- .../gpu/drm/amd/powerplay/smumgr/rv_smumgr.c | 2 +- .../drm/amd/powerplay/smumgr/smu7_smumgr.c | 3 +- .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 2 +- 5 files changed, 50 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 9c1479dcf79ce..73969f35846ce 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -451,7 +451,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, * reached the given value.The indirect space is described by giving * the memory-mapped index of the indirect index register. */ -void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, +int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, @@ -459,14 +459,50 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, { if (hwmgr == NULL || hwmgr->device == NULL) { pr_err("Invalid Hardware Manager!"); - return; + return -EINVAL; } cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); } +int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, + uint32_t value, uint32_t mask) +{ + uint32_t i; + uint32_t cur_value; + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + + for (i = 0; i < hwmgr->usec_timeout; i++) { + cur_value = cgs_read_register(hwmgr->device, + index); + if ((cur_value & mask) != (value & mask)) + break; + udelay(1); + } + + /* timeout means wrong logic */ + if (i == hwmgr->usec_timeout) + return -ETIME; + return 0; +} + +int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t indirect_port, + uint32_t index, + uint32_t value, + uint32_t mask) +{ + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + + cgs_write_register(hwmgr->device, indirect_port, index); + return phm_wait_for_register_unequal(hwmgr, indirect_port + 1, + value, mask); +} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 859cca496b446..1c605f966b5fc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -792,12 +792,19 @@ extern int hwmgr_handle_task(struct pp_instance *handle, extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, +extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask); +extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, + uint32_t index, + uint32_t value, uint32_t mask); +extern int phm_wait_for_indirect_register_unequal( + struct pp_hwmgr *hwmgr, + uint32_t indirect_port, uint32_t index, + uint32_t value, uint32_t mask); extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); @@ -882,5 +889,4 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index f9afe88569d1b..b98ade676d128 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c @@ -79,7 +79,7 @@ static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr) reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(hwmgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); return cgs_read_register(hwmgr->device, reg); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 412cf6f74f670..bb26906edb86b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -487,11 +487,10 @@ int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; - ret = smum_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, + ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), fw_mask, fw_mask); - return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 4cb5d3460fef0..2f979fb868248 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -90,7 +90,7 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(hwmgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); return cgs_read_register(hwmgr->device, reg); From b05720cbf6458450700d1c3e91d2b2620b4f6295 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 19:28:29 +0800 Subject: [PATCH 218/232] drm/amd/powerplay: move SMUM_WAIT_INDIRECT_FIELD_UNEQUAL to hwmgr.h the macro is not relevent to SMU, so move to hwmgr.h and rename to PHM_WAIT_INDIRECT_FIELD_UNEQUAL Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 13 +++++++++++++ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 11 +---------- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 2 +- .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 2 +- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 1c605f966b5fc..277d2604e32e7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -889,4 +889,17 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, \ + mm##port##_INDEX, index, value, mask) + +#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field) ) + + #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 54b151b03aa85..c64abd57f9080 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -117,6 +117,7 @@ extern void smum_wait_for_indirect_register_unequal( uint32_t indirect_port, uint32_t index, uint32_t value, uint32_t mask); + extern int smu_allocate_memory(void *device, uint32_t size, enum cgs_gpu_mem_type type, uint32_t byte_align, uint64_t *mc_addr, @@ -242,15 +243,5 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(hwmgr, \ - mm##port##_INDEX, index, value, mask) - -#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field) ) #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index eafac957b0f6f..d40f4a3d5e287 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -63,7 +63,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); */ SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index fd63d2800d05d..3a134eae52924 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -137,7 +137,7 @@ static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr) } /* wait for smc boot up */ - SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* clear firmware interrupt enable flag */ From 554d95da398b6b998f8801daf0d0ace5caab34ff Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:00:50 +0800 Subject: [PATCH 219/232] drm/amd/powerplay: move SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL to hwmgr.h the macro is not relevant to SMU, so move to hwmgr.h and rename to PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 13 +++++++++++++ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 13 ++----------- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 4 ++-- .../gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 6 +++--- drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 4 ++-- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 277d2604e32e7..85a2df2fbaa95 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -902,4 +902,17 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t PHM_FIELD_MASK(reg, field) ) +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + port, index, value, mask) \ + phm_wait_for_indirect_register_unequal(hwmgr, \ + mm##port##_INDEX_11, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index c64abd57f9080..125fa3e812b33 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -198,17 +198,13 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); smum_wait_on_indirect_register(hwmgr, \ mm##port##_INDEX_11, index, value, mask) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ - port, index, value, mask) \ - smum_wait_for_indirect_register_unequal(hwmgr, \ - mm##port##_INDEX_11, index, value, mask) + #define SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + /*Operations on named fields.*/ @@ -238,10 +234,5 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field)) -#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index d40f4a3d5e287..762fe163e8606 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -104,7 +104,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ @@ -126,7 +126,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 884ba2ca5399a..3cc946dee3d1e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -220,7 +220,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ + /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ /* Assert reset */ SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -250,7 +250,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) /* Wait done bit to be set */ /* Check pass/failed indicator */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) @@ -275,7 +275,7 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 105cb2a3ff6d9..a0e0f5efb6fb3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -78,7 +78,7 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) smu7_send_msg_to_smc_offset(hwmgr); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ @@ -101,7 +101,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /*Clear firmware interrupt enable flag*/ From 57d13f794dcf918d9710923d0c64edb14e370271 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:04:33 +0800 Subject: [PATCH 220/232] drm/amd/powerplay: move PHM_WAIT_VFPF_INDIRECT_FIELD to hwmgr.h the macro is not relevant to SMU, so move to hwmgr.h and rename to PHM_WAIT_VFPF_INDIRECT_FIELD Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 14 ++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 14 +------------- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 6 +++--- .../drm/amd/powerplay/smumgr/polaris10_smumgr.c | 6 +++--- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 2 +- .../gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 6 +++--- 6 files changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 85a2df2fbaa95..2ac8d7b59e613 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -915,4 +915,18 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t (fieldval) << PHM_FIELD_SHIFT(reg, field), \ PHM_FIELD_MASK(reg, field)) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ + port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, \ + mm##port##_INDEX_11, index, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) + +#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 125fa3e812b33..099758d405a6f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -193,16 +193,7 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ - port, index, value, mask) \ - smum_wait_on_indirect_register(hwmgr, \ - mm##port##_INDEX_11, index, value, mask) - - - -#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) @@ -229,10 +220,7 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); reg, field, fieldval)) -#define SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 762fe163e8606..75ed7c3ea9900 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -96,7 +96,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); @@ -115,7 +115,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; @@ -153,7 +153,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 3cc946dee3d1e..fd4ccd0969851 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -241,7 +241,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /* Call Test SMU message with 0x20000 offset to trigger SMU start */ @@ -265,7 +265,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } @@ -301,7 +301,7 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index bb26906edb86b..a360c3ce5da2b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -235,7 +235,7 @@ int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index a0e0f5efb6fb3..6a9b3cf3fdaa4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -69,7 +69,7 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /** @@ -89,7 +89,7 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; @@ -129,7 +129,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; From 538fdf1fe7bea0e3a1cfde4ebf6ded9f397a1914 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:29:23 +0800 Subject: [PATCH 221/232] drm/amd/powerplay: move macros to hwmgr.h the macro is not relevant to SMU, so rename SMU_WAIT_FIELD_UNEQUAL to PHM_WAIT_FIELD_UNEQUAL and move to hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 14 ++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 14 -------------- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 4 ++-- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 8 ++++---- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 2ac8d7b59e613..126b44d47a99b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -929,4 +929,18 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t (fieldval) << PHM_FIELD_SHIFT(reg, field), \ PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + index, value, mask) \ + phm_wait_for_register_unequal(hwmgr, \ + index, value, mask) + +#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ + PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ + mm##reg, value, mask) + +#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ + PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \ + (fieldval) << PHM_FIELD_SHIFT(reg, field), \ + PHM_FIELD_MASK(reg, field)) + #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 099758d405a6f..75ba6eb246a6f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -163,20 +163,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ SMUM_FIELD_MASK(reg, field) ) -#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ - index, value, mask) \ - smum_wait_for_register_unequal(hwmgr, \ - index, value, mask) - -#define SMUM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ - SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ - mm##reg, value, mask) - -#define SMUM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ - SMUM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \ - (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field)) - #define SMUM_GET_FIELD(value, reg, field) \ (((value) & SMUM_FIELD_MASK(reg, field)) \ >> SMUM_FIELD_SHIFT(reg, field)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index ccd65819722b2..ea9ea3fe1b765 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -217,7 +217,7 @@ int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 8aee9c817ff98..9628e0379fd81 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -68,7 +68,7 @@ static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = SMUM_WAIT_FIELD_UNEQUAL(hwmgr, + result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); @@ -90,7 +90,7 @@ static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) if (result != 0) return result; - return SMUM_WAIT_FIELD_UNEQUAL(hwmgr, + return PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 75ed7c3ea9900..0b7cb3b0510eb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -101,7 +101,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); /* Wait for done bit to be set */ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index a360c3ce5da2b..0f23e238c5f1b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -170,7 +170,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) return -EINVAL; - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); @@ -179,7 +179,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); @@ -202,7 +202,7 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui return -EINVAL; } - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); @@ -222,7 +222,7 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); if (1 != SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) pr_info("Failed to send Message.\n"); From fbabae46964fec421c717d27b57d4383e8ae4b64 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:17:08 +0800 Subject: [PATCH 222/232] drm/amd/powerplay: delete SMU_WRITE_INDIRECT_FIELD the macro is as same as PHM_WRITE_INDIRECT_FIELD Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 15 --------------- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 4 ++-- .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 8 ++++---- 3 files changed, 6 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 75ba6eb246a6f..10d745737e056 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -179,11 +179,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) - - - - - /*Operations on named fields.*/ #define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \ @@ -199,14 +194,4 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) - -#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ - cgs_write_ind_register(device, port, ix##reg, \ - SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field, fieldval)) - - - - - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index ea9ea3fe1b765..75ee6233c624c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -1934,9 +1934,9 @@ static int ci_start_smc(struct pp_hwmgr *hwmgr) ci_program_jump_on_start(hwmgr); /* enable smc clock */ - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 3a134eae52924..fe57335152d52 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -41,7 +41,7 @@ static int iceland_start_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); return 0; @@ -49,7 +49,7 @@ static int iceland_start_smc(struct pp_hwmgr *hwmgr) static void iceland_reset_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); } @@ -57,14 +57,14 @@ static void iceland_reset_smc(struct pp_hwmgr *hwmgr) static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); } static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); } From a9eca3a685b9fc3c9910eca4783ef07a2345b9e0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:18:16 +0800 Subject: [PATCH 223/232] drm/amd/powerplay: delete SMUM_WRITE_FIELD the macro is as same as PHM_WRITE_FIELD Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 3 --- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 6 +++--- drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 4 ++-- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 +++--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 10d745737e056..4433e0024cf9e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -185,9 +185,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) -#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \ - cgs_write_register(device, mm##reg, \ - SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval)) #define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \ cgs_write_ind_register(device, port, ix##reg, \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index 75ee6233c624c..5c531eb96648a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -104,7 +104,7 @@ static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); return 0; } @@ -2322,14 +2322,14 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) } cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); for (; byte_count >= 4; byte_count -= 4) { data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; } - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); if (0 != byte_count) { pr_err("SMC size must be dividable by 4\n"); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index fe57335152d52..b729a393e5887 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -97,7 +97,7 @@ static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); while (byte_count >= 4) { data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; @@ -106,7 +106,7 @@ static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, byte_count -= 4; } - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 0f23e238c5f1b..af5c4d30cf3e7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -40,7 +40,7 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ return 0; } @@ -506,12 +506,12 @@ static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); for (; byte_count >= 4; byte_count -= 4) cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++); - SMUM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); From 37192704d9f55b0a64248c91a251fc6665f88045 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:19:58 +0800 Subject: [PATCH 224/232] drm/amd/powerplay: delete SMUM_WRITE_VFPF_INDIRECT_FIELD repeated defining in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 6 ------ .../gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 14 +++++++------- .../amd/powerplay/smumgr/polaris10_smumgr.c | 18 +++++++++--------- .../drm/amd/powerplay/smumgr/tonga_smumgr.c | 14 +++++++------- 4 files changed, 23 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 4433e0024cf9e..b1b2104453c82 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -185,10 +185,4 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) - -#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \ - cgs_write_ind_register(device, port, ix##reg, \ - SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field, fieldval)) - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 0b7cb3b0510eb..ee89fd7c83429 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -66,7 +66,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); result = smu7_upload_smu_firmware_image(hwmgr); @@ -77,11 +77,11 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for ROM firmware to initialize interrupt hendler */ @@ -89,7 +89,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ @@ -134,7 +134,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) ixFIRMWARE_FLAGS, 0); /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); result = smu7_upload_smu_firmware_image(hwmgr); @@ -145,11 +145,11 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) smu7_program_jump_on_start(hwmgr); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index fd4ccd0969851..eefa13ba4eaf2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -223,7 +223,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); result = smu7_upload_smu_firmware_image(hwmgr); @@ -233,11 +233,11 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) /* Clear status */ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); @@ -258,10 +258,10 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ @@ -278,11 +278,11 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ + /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); @@ -293,10 +293,10 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) /* Set smc instruct start point at 0x0 */ smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 6a9b3cf3fdaa4..7ffcadaa1a537 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -42,7 +42,7 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) int result; /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); result = smu7_upload_smu_firmware_image(hwmgr); @@ -54,15 +54,15 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) ixSMU_STATUS, 0); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ @@ -109,7 +109,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); result = smu7_upload_smu_firmware_image(hwmgr); @@ -121,11 +121,11 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /*De-assert reset*/ - SMUM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ From f0f6e3752a8138342038c89d25856ce28a36160b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:21:25 +0800 Subject: [PATCH 225/232] drm/amd/powerplay: delete SMUM_READ_VFPF_INDIRECT_FIELD repeated defining in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 5 ----- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 4 ++-- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 6 +++--- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 4 ++-- 6 files changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index b1b2104453c82..0bd4476106bf0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -179,10 +179,5 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) -/*Operations on named fields.*/ - -#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \ - SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field) #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index 5c531eb96648a..445829d329d03 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -188,7 +188,7 @@ static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr) bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index ee89fd7c83429..5b25e067b2f1d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -108,7 +108,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); @@ -304,7 +304,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) fiji_avfs_event_mgr(hwmgr, false); /* Check if SMU is running in protected mode */ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { result = fiji_start_smu_in_non_protection_mode(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index eefa13ba4eaf2..22b8ecbf7fce8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -252,7 +252,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); @@ -316,8 +316,8 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr) /* Only start SMC if SMC RAM is not running */ if (!smu7_is_smc_ram_running(hwmgr)) { SMU_VFT_INTACT = false; - smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index af5c4d30cf3e7..89e2464860f07 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -158,7 +158,7 @@ int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr) bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 7ffcadaa1a537..d22cf218cf187 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -82,7 +82,7 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { pr_err("SMU Firmware start failed\n"); return -EINVAL; @@ -143,7 +143,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) if (!(smu7_is_smc_ram_running(hwmgr) || cgs_is_virtualization_enabled(hwmgr->device))) { /*Check if SMU is running in protected mode*/ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { result = tonga_start_in_non_protection_mode(hwmgr); if (result) From 95175869bd309c77f8391c6ea2c2ba440d7f9af7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:24:58 +0800 Subject: [PATCH 226/232] drm/amd/powerplay: delete SMUM_SET_FIELD repeated defining in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 5 ----- drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 12 ++++++------ 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 0bd4476106bf0..b742c22bb69b1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -170,11 +170,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); #define SMUM_READ_FIELD(device, reg, field) \ SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field) -#define SMUM_SET_FIELD(value, reg, field, field_val) \ - (((value) & ~SMUM_FIELD_MASK(reg, field)) | \ - (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ - SMUM_FIELD_SHIFT(reg, field)))) - #define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 9628e0379fd81..efdc1cb7ab634 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -191,17 +191,17 @@ static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) /* Disable MEC parsing/prefetching */ tmp = cgs_read_register(hwmgr->device, mmCP_MEC_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp); tmp = cgs_read_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); reg_data = smu_lower_32_bits(info.mc_addr) & From 515113f5e5835ee3ecb00d3da292ca67b5e7a972 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:26:03 +0800 Subject: [PATCH 227/232] drm/amd/powerplay: delete SMUM_READ_FIELD repeated defining in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 2 -- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 +++--- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index b742c22bb69b1..ebe988b3ecdbd 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -167,8 +167,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); (((value) & SMUM_FIELD_MASK(reg, field)) \ >> SMUM_FIELD_SHIFT(reg, field)) -#define SMUM_READ_FIELD(device, reg, field) \ - SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field) #define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index 445829d329d03..bad9bf2bcb5a1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -219,7 +219,7 @@ int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d\n", msg, ret); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index d0913a6696fde..c92ea38d2e15e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -866,7 +866,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && - (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)) mem_level->StutterEnable = true; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 89e2464860f07..2ae05bbdb9740 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -172,7 +172,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); @@ -181,7 +181,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); @@ -224,7 +224,7 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) + if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) pr_info("Failed to send Message.\n"); return 0; From 0041e6007ecb45f4d48f5a445ace73c9196a5f82 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:31:07 +0800 Subject: [PATCH 228/232] drm/amd/powerplay: delete SMUM_WAIT_INDIRECT_FIELD repeated defining in hwmgr.h use PHM_WAIT_INDIRECT_FIELD instand. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 11 ----------- drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 2 +- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index ebe988b3ecdbd..cc67d225995fb 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -151,17 +151,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK -#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \ - port, index, value, mask) \ - smum_wait_on_indirect_register(hwmgr, \ - mm##port##_INDEX, index, value, mask) - -#define SMUM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define SMUM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - SMUM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ - SMUM_FIELD_MASK(reg, field) ) #define SMUM_GET_FIELD(value, reg, field) \ (((value) & SMUM_FIELD_MASK(reg, field)) \ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c index bad9bf2bcb5a1..9ee14315dce79 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -1938,7 +1938,7 @@ static int ci_start_smc(struct pp_hwmgr *hwmgr) PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index b729a393e5887..78aa1122eacce 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -80,7 +80,7 @@ static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr) /* de-assert reset */ iceland_start_smc(hwmgr); - SMUM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; From 63196fe79b28e2b161a6d951877bdd0451b1f1a3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:33:55 +0800 Subject: [PATCH 229/232] drm/amd/powerplay: delete SMUM_FIELD_MASK repeated defining in hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index efdc1cb7ab634..78ab0556e48f7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -205,11 +205,11 @@ static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); reg_data = smu_lower_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); + PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data); reg_data = smu_upper_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); + PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data); return 0; From aec8d5cc28b32b02e09c92c422f4a4ed9f53ff74 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 20 Sep 2017 17:34:15 +0800 Subject: [PATCH 230/232] drm/amd/powerplay: delete dead code in smumgr Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 32 ------- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 87 ------------------- 2 files changed, 119 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index cc67d225995fb..7c9aba81cd6a2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -101,30 +101,12 @@ extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -extern int smum_wait_on_register(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern int smum_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern int smum_wait_on_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index, - uint32_t value, uint32_t mask); - - -extern void smum_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index, - uint32_t value, uint32_t mask); - - extern int smu_allocate_memory(void *device, uint32_t size, enum cgs_gpu_mem_type type, uint32_t byte_align, uint64_t *mc_addr, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); -extern int vega10_smum_init(struct pp_hwmgr *hwmgr); extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); @@ -147,19 +129,5 @@ extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); -#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT - -#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK - - -#define SMUM_GET_FIELD(value, reg, field) \ - (((value) & SMUM_FIELD_MASK(reg, field)) \ - >> SMUM_FIELD_SHIFT(reg, field)) - - -#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ - SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ - reg, field) - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index d3c12e0ca4647..8673884565300 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -144,93 +144,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, hwmgr, msg, parameter); } -/* - * Returns once the part of the register indicated by the mask has - * reached the given value. - */ -int smum_wait_on_register(struct pp_hwmgr *hwmgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (hwmgr == NULL || hwmgr->device == NULL) - return -EINVAL; - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, index); - if ((cur_value & mask) == (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == hwmgr->usec_timeout) - return -1; - - return 0; -} - -int smum_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (hwmgr == NULL) - return -EINVAL; - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, - index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic */ - if (i == hwmgr->usec_timeout) - return -1; - - return 0; -} - - -/* - * Returns once the part of the register indicated by the mask - * has reached the given value.The indirect space is described by - * giving the memory-mapped index of the indirect index register. - */ -int smum_wait_on_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) - return -EINVAL; - - cgs_write_register(hwmgr->device, indirect_port, index); - return smum_wait_on_register(hwmgr, indirect_port + 1, - mask, value); -} - -void smum_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) - return; - cgs_write_register(hwmgr->device, indirect_port, index); - smum_wait_for_register_unequal(hwmgr, indirect_port + 1, - value, mask); -} - int smu_allocate_memory(void *device, uint32_t size, enum cgs_gpu_mem_type type, uint32_t byte_align, uint64_t *mc_addr, From dfced2e4bcbc8d6bd9bad11cc6b4643ba36ed35a Mon Sep 17 00:00:00 2001 From: Samuel Li Date: Tue, 22 Aug 2017 15:25:33 -0400 Subject: [PATCH 231/232] drm/amdgpu: Add gem_prime_mmap support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: drop hdp invalidate/flush. v3: honor pgoff during prime mmap. Add a barrier after cpu access. v4: drop begin/end_cpu_access() for now, revisit later. Signed-off-by: Samuel Li Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 34 +++++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7c43add4e4444..ebfc267467eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -413,6 +413,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); /* sub-allocation manager, it has to be protected by another lock. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 91e42b60d66b5..4f98960e47f9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -855,6 +855,7 @@ static struct drm_driver kms_driver = { .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_vmap = amdgpu_gem_prime_vmap, .gem_prime_vunmap = amdgpu_gem_prime_vunmap, + .gem_prime_mmap = amdgpu_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 7e0826469b5e8..90af8e82b16af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ttm_bo_kunmap(&bo->dma_buf_vmap); } +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + unsigned asize = amdgpu_bo_size(bo); + int ret; + + if (!vma->vm_file) + return -ENODEV; + + if (adev == NULL) + return -ENODEV; + + /* Check for valid size. */ + if (asize < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || + (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { + return -EPERM; + } + vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; + + /* prime mmap does not need to check access, so allow here */ + ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); + if (ret) + return ret; + + ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); + drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); + + return ret; +} + struct drm_gem_object * amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, From 6f87a895709eecc1542fe947e349364ad061ac00 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Sep 2017 10:20:27 -0400 Subject: [PATCH 232/232] drm/amdgpu: clarify license in amdgpu_trace_points.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It was not clear. The rest of the driver is MIT/X11. Reviewed-by: Christian König Acked-by: Dave Airlie Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_trace_points.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c index 385b7e1d72f9e..9ec96b9e85d16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c @@ -1,4 +1,23 @@ /* Copyright Red Hat Inc 2010. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Author : Dave Airlie */ #include