Skip to content

Commit

Permalink
drm/amdkfd: Avoid name confusion involved in queue unmapping
Browse files Browse the repository at this point in the history
When unmapping the queues from HW scheduler, there are two actions:
reset and preempt. So naming the variables with only preempt is
inapproriate.

For functions such as destroy_queues_cpsch, what they do actually is to
unmap the queues on HW scheduler rather than to destroy them. Change the
name to reflect that fact. On the other hand, there is already a function
called destroy_queue_cpsch() which exactly destroys a queue, and the name
is very close to destroy_queues_cpsch(), resulting in confusion.

Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
  • Loading branch information
Yong Zhao authored and Oded Gabbay committed Sep 27, 2017
1 parent 58dcd5b commit 7da2bcf
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 35 deletions.
32 changes: 16 additions & 16 deletions drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);

static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
static int destroy_queues_cpsch(struct device_queue_manager *dqm,
bool preempt_static_queues, bool lock);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
bool static_queues_included, bool lock);

static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
Expand Down Expand Up @@ -707,7 +707,7 @@ static int start_cpsch(struct device_queue_manager *dqm)

static int stop_cpsch(struct device_queue_manager *dqm)
{
destroy_queues_cpsch(dqm, true, true);
unmap_queues_cpsch(dqm, true, true);

kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
Expand Down Expand Up @@ -750,7 +750,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
mutex_lock(&dqm->lock);
/* here we actually preempt the DIQ */
destroy_queues_cpsch(dqm, true, false);
unmap_queues_cpsch(dqm, true, false);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
Expand Down Expand Up @@ -849,19 +849,19 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}

static int destroy_sdma_queues(struct device_queue_manager *dqm,
static int unmap_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine);
}

static int destroy_queues_cpsch(struct device_queue_manager *dqm,
bool preempt_static_queues, bool lock)
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
bool static_queues_included, bool lock)
{
int retval;
enum kfd_preempt_type_filter preempt_type;
enum kfd_unmap_queues_filter filter;
struct kfd_process_device *pdd;

retval = 0;
Expand All @@ -875,16 +875,16 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
dqm->sdma_queue_count);

if (dqm->sdma_queue_count > 0) {
destroy_sdma_queues(dqm, 0);
destroy_sdma_queues(dqm, 1);
unmap_sdma_queues(dqm, 0);
unmap_sdma_queues(dqm, 1);
}

preempt_type = preempt_static_queues ?
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
filter = static_queues_included ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;

retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
preempt_type, 0, false, 0);
filter, 0, false, 0);
if (retval)
goto out;

Expand Down Expand Up @@ -916,7 +916,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
if (lock)
mutex_lock(&dqm->lock);

retval = destroy_queues_cpsch(dqm, false, false);
retval = unmap_queues_cpsch(dqm, false, false);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
Expand Down
18 changes: 9 additions & 9 deletions drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
}

int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_preempt_type_filter mode,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset,
unsigned int sdma_engine)
{
Expand All @@ -494,8 +494,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,

packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type);
pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
filter, reset, type);
packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
switch (type) {
Expand All @@ -521,29 +521,29 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;

switch (mode) {
case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
WARN(1, "filter %d", mode);
WARN(1, "filter %d", filter);
retval = -EINVAL;
goto err_invalid;
}
Expand Down
20 changes: 10 additions & 10 deletions drivers/gpu/drm/amd/amdkfd/kfd_priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -222,22 +222,22 @@ void kfd_chardev_exit(void);
struct device *kfd_chardev(void);

/**
* enum kfd_preempt_type_filter
* enum kfd_unmap_queues_filter
*
* @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
* @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
*
* @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
* @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
* running queues list.
*
* @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
* @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
* specific process.
*
*/
enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
KFD_PREEMPT_TYPE_FILTER_BY_PASID
enum kfd_unmap_queues_filter {
KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
KFD_UNMAP_QUEUES_FILTER_BY_PASID
};

/**
Expand Down Expand Up @@ -700,7 +700,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value);

int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_preempt_type_filter mode,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset,
unsigned int sdma_engine);

Expand Down

0 comments on commit 7da2bcf

Please sign in to comment.