diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 07ac724e3ec90..ee3e04e10dae3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -109,7 +109,7 @@ static int kfd_open(struct inode *inode, struct file *filep) is_32bit_user_mode = in_compat_syscall(); - if (is_32bit_user_mode == true) { + if (is_32bit_user_mode) { dev_warn(kfd_device, "Process %d (32-bit) failed to open /dev/kfd\n" "32-bit processes are not supported by amdkfd\n", @@ -131,12 +131,11 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_get_version_args *args = data; - int err = 0; args->major_version = KFD_IOCTL_MAJOR_VERSION; args->minor_version = KFD_IOCTL_MINOR_VERSION; - return err; + return 0; } static int set_queue_properties_from_user(struct queue_properties *q_properties, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 4bb7f42237624..f49c551195b3c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -216,7 +216,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) } } - if (set == false) + if (!set) return -EBUSY; pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n", @@ -354,7 +354,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) return -ENOMEM; } - if (q->properties.is_active == true) + if (q->properties.is_active) prev_active = true; /* @@ -363,9 +363,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) * and modify counter accordingly */ retval = mqd->update_mqd(mqd, q->mqd, &q->properties); - if ((q->properties.is_active == true) && (prev_active == false)) + if ((q->properties.is_active) && (!prev_active)) dqm->queue_count++; - else if ((q->properties.is_active == false) && (prev_active == true)) + else if ((!q->properties.is_active) && (prev_active)) dqm->queue_count--; if (sched_policy != KFD_SCHED_POLICY_NO_HWS) @@ -954,7 +954,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, if (lock) mutex_lock(&dqm->lock); - if (dqm->active_runlist == false) + if (!dqm->active_runlist) goto out; pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index b6e28dcaea1d7..a6a4b2b1c0d90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -177,9 +177,9 @@ static bool allocate_event_notification_slot(struct file *devkfd, bool ret; ret = allocate_free_slot(p, page, signal_slot_index); - if (ret == false) { + if (!ret) { ret = allocate_signal_page(devkfd, p); - if (ret == true) + if (ret) ret = allocate_free_slot(p, page, signal_slot_index); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 8fa894100290a..9beae87aadd54 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -300,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, break; } - if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { + if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) { pr_err("amdkfd: failed to init kernel queue\n"); kfree(kq); return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 90f391434fa39..ca8c09326b310 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -98,7 +98,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, int retval; BUG_ON(!pm); - BUG_ON(pm->allocated == true); + BUG_ON(pm->allocated); BUG_ON(is_over_subscription == NULL); pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); @@ -292,7 +292,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, q->properties.doorbell_off; packet->mes_map_queues_ordinals[0].bitfields3.is_static = - (use_static == true) ? 1 : 0; + (use_static) ? 1 : 0; packet->mes_map_queues_ordinals[0].mqd_addr_lo = lower_32_bits(q->gart_mqd_addr); @@ -357,7 +357,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, alloc_size_bytes); list_for_each_entry(kq, &qpd->priv_queue_list, list) { - if (kq->queue->properties.is_active != true) + if (!kq->queue->properties.is_active) continue; pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n", @@ -383,7 +383,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, } list_for_each_entry(q, &qpd->queues_list, list) { - if (q->properties.is_active != true) + if (!q->properties.is_active) continue; pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n", @@ -531,7 +531,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) fail_acquire_packet_buffer: mutex_unlock(&pm->lock); fail_create_runlist_ib: - if (pm->allocated == true) + if (pm->allocated) pm_release_ib(pm); return retval; } @@ -647,7 +647,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, default: BUG(); break; - }; + } pm->priv_queue->ops.submit_packet(pm->priv_queue);