Skip to content

Commit

Permalink
drm/amdgpu: Enable GFX11 SDMA context empty interrupt
Browse files Browse the repository at this point in the history
Enable SDMA queue empty context switching. SDMA context switch due to
quantum programming no longer done here (as of sdma v6), so re-name
sdma_v6_0_ctx_switch_enable to sdma_v6_0_ctxempty_int_enable to reflect
this.

Also program SDMAx_QUEUEx_SCHEDULE_CNTL for context switch due to
quantum in KFD. Set to amdgpu_sdma_phase_quantum (defaults to 32 i.e.
3200us).

Signed-off-by: Graham Sider <Graham.Sider@amd.com>
Reviewed-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Reviewed-by: Stanley Yang <Stanley.Yang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Graham Sider authored and Alex Deucher committed Apr 11, 2023
1 parent 00fa403 commit 2748868
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 10 deletions.
28 changes: 18 additions & 10 deletions drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
}

/**
* sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch
* sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
* @enable: enable/disable context switching due to queue empty conditions
*
* Halt or unhalt the async dma engines context switch.
* Enable or disable the async dma engines queue empty context switch.
*/
static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;

if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
}
}
}

/**
Expand Down Expand Up @@ -579,10 +590,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)

ring->sched.ready = true;

if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v6_0_ctx_switch_enable(adev, true);
if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
}

r = amdgpu_ring_test_helper(ring);
if (r) {
Expand Down Expand Up @@ -778,7 +787,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
int r = 0;

if (amdgpu_sriov_vf(adev)) {
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);

/* set RB registers */
Expand All @@ -799,7 +807,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
/* unhalt the MEs */
sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v6_0_ctx_switch_enable(adev, true);
sdma_v6_0_ctxempty_int_enable(adev, true);

/* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev);
Expand Down Expand Up @@ -1340,7 +1348,7 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0;
}

sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);

return 0;
Expand Down
4 changes: 4 additions & 0 deletions drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;

m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
<< SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
& SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;

m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
Expand Down

0 comments on commit 2748868

Please sign in to comment.