Skip to content

Commit

Permalink
drm/amdkfd: replace kgd_dev in static gfx v8 funcs
Browse files Browse the repository at this point in the history
Static funcs in amdgpu_amdkfd_gfx_v8.c now using amdgpu_device.

Signed-off-by: Graham Sider <Graham.Sider@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Graham Sider authored and Alex Deucher committed Nov 17, 2021
1 parent 9365fbf commit 1cca608
Showing 1 changed file with 23 additions and 28 deletions.
51 changes: 23 additions & 28 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,38 +44,33 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
return (struct amdgpu_device *)kgd;
}

static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);

mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}

static void unlock_srbm(struct kgd_dev *kgd)
static void unlock_srbm(struct amdgpu_device *adev)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);

WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}

static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);

uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);

lock_srbm(kgd, mec, pipe, queue_id, 0);
lock_srbm(adev, mec, pipe, queue_id, 0);
}

static void release_queue(struct kgd_dev *kgd)
static void release_queue(struct amdgpu_device *adev)
{
unlock_srbm(kgd);
unlock_srbm(adev);
}

static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
Expand All @@ -86,14 +81,14 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);

lock_srbm(kgd, 0, 0, 0, vmid);
lock_srbm(adev, 0, 0, 0, vmid);

WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);

unlock_srbm(kgd);
unlock_srbm(adev);
}

static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
Expand Down Expand Up @@ -132,12 +127,12 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);

lock_srbm(kgd, mec, pipe, 0, 0);
lock_srbm(adev, mec, pipe, 0, 0);

WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);

unlock_srbm(kgd);
unlock_srbm(adev);

return 0;
}
Expand Down Expand Up @@ -178,7 +173,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,

m = get_mqd(mqd);

acquire_queue(kgd, pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);

/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
Expand Down Expand Up @@ -226,16 +221,16 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(kgd);
release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);

data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);

release_queue(kgd);
release_queue(adev);

return 0;
}
Expand All @@ -258,7 +253,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;

acquire_queue(kgd, pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);

DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
Expand All @@ -268,7 +263,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
DUMP_REG(reg);

release_queue(kgd);
release_queue(adev);

WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
Expand Down Expand Up @@ -375,7 +370,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
bool retval = false;
uint32_t low, high;

acquire_queue(kgd, pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
Expand All @@ -385,7 +380,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
release_queue(kgd);
release_queue(adev);
return retval;
}

Expand Down Expand Up @@ -422,7 +417,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;

acquire_queue(kgd, pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);

if (m->cp_hqd_vmid == 0)
WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
Expand Down Expand Up @@ -502,13 +497,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
release_queue(kgd);
release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}

release_queue(kgd);
release_queue(adev);
return 0;
}

Expand Down Expand Up @@ -612,9 +607,9 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;

lock_srbm(kgd, 0, 0, 0, vmid);
lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
unlock_srbm(kgd);
unlock_srbm(adev);
}

static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
Expand Down

0 comments on commit 1cca608

Please sign in to comment.