Skip to content

Commit

Permalink
drm/scheduler: provide scheduler score externally
Browse files Browse the repository at this point in the history
Allow multiple schedulers to share the load balancing score.

This is useful when one engine has different hw rings.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-and-Tested-by: Leo Liu <leo.liu@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210204144405.2737-1-christian.koenig@amd.com
  • Loading branch information
Christian König committed Feb 5, 2021
1 parent f4a84e1 commit f2f12eb
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 21 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,

r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
timeout, ring->name);
timeout, NULL, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/etnaviv/etnaviv_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)

ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), dev_name(gpu->dev));
msecs_to_jiffies(500), NULL, dev_name(gpu->dev));
if (ret)
return ret;

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/lima/lima_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)

return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
lima_job_hang_limit, msecs_to_jiffies(timeout),
name);
NULL, name);
}

void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/panfrost/panfrost_job.c
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
ret = drm_sched_init(&js->queue[j].sched,
&panfrost_sched_ops,
1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
"pan_js");
NULL, "pan_js");
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/scheduler/sched_entity.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;

trace_drm_sched_job(sched_job, entity);
atomic_inc(&entity->rq->sched->score);
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);

Expand Down
18 changes: 9 additions & 9 deletions drivers/gpu/drm/scheduler/sched_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_inc(&rq->sched->score);
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
Expand All @@ -110,7 +110,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_dec(&rq->sched->score);
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
Expand Down Expand Up @@ -173,7 +173,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_fence->sched;

atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->score);
atomic_dec(sched->score);

trace_drm_sched_process_job(s_fence);

Expand Down Expand Up @@ -732,7 +732,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
continue;
}

num_score = atomic_read(&sched->score);
num_score = atomic_read(sched->score);
if (num_score < min_score) {
min_score = num_score;
picked_sched = sched;
Expand Down Expand Up @@ -842,23 +842,23 @@ static int drm_sched_main(void *param)
* @hw_submission: number of hw submissions that can be in flight
* @hang_limit: number of times to allow a job to hang before dropping it
* @timeout: timeout value in jiffies for the scheduler
* @score: optional score atomic shared with other schedulers
* @name: name used for debugging
*
* Return 0 on success, otherwise error code.
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
unsigned hang_limit,
long timeout,
const char *name)
unsigned hw_submission, unsigned hang_limit, long timeout,
atomic_t *score, const char *name)
{
int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
sched->score = score ? score : &sched->_score;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);

Expand All @@ -868,7 +868,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
atomic_set(&sched->score, 0);
atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);

/* Each scheduler will run on a seperate kernel thread */
Expand Down
10 changes: 5 additions & 5 deletions drivers/gpu/drm/v3d/v3d_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_bin");
NULL, "v3d_bin");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
Expand All @@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_render");
NULL, "v3d_render");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
Expand All @@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_tfu");
NULL, "v3d_tfu");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
Expand All @@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_csd_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_csd");
NULL, "v3d_csd");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
Expand All @@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_cache_clean_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_cache_clean");
NULL, "v3d_cache_clean");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
Expand Down
5 changes: 3 additions & 2 deletions include/drm/gpu_scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,15 +297,16 @@ struct drm_gpu_scheduler {
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
atomic_t score;
atomic_t *score;
atomic_t _score;
bool ready;
bool free_guilty;
};

int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
atomic_t *score, const char *name);

void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
Expand Down

0 comments on commit f2f12eb

Please sign in to comment.