Skip to content

Commit

Permalink
drm/sched: Convert drm scheduler to use a work queue rather than kthread
Browse files Browse the repository at this point in the history
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.

1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.

2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.

A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.

v2:
  - (Rob Clark) Fix msm build
  - Pass in run work queue
v3:
  - (Boris) don't have loop in worker
v4:
  - (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
  - (Boris) default to ordered work queue
v6:
  - (Luben / checkpatch) fix alignment in msm_ringbuffer.c
  - (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
  - (Luben) Update comment for drm_sched_wqueue_enqueue
  - (Luben) Positive check for submit_wq in drm_sched_init
  - (Luben) s/alloc_submit_wq/own_submit_wq
v7:
  - (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
  - (Luben) Adjust var names / comments

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
  • Loading branch information
Matthew Brost authored and Luben Tuikov committed Nov 1, 2023
1 parent 35963cf commit a6149f0
Show file tree
Hide file tree
Showing 9 changed files with 86 additions and 81 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -2279,7 +2279,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}

r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
ring->num_hw_submission, 0,
timeout, adev->reset_domain->wq,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/etnaviv/etnaviv_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
int ret;

ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/lima/lima_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)

INIT_WORK(&pipe->recover_work, lima_sched_recover_work);

return drm_sched_init(&pipe->base, &lima_sched_ops,
return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
1,
lima_job_hang_limit,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/msm/msm_ringbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
/* currently managing hangcheck ourselves: */
sched_timeout = MAX_SCHEDULE_TIMEOUT;

ret = drm_sched_init(&ring->sched, &msm_sched_ops,
ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
num_hw_submissions, 0, sched_timeout,
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/nouveau/nouveau_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ int nouveau_sched_init(struct nouveau_drm *drm)
if (!drm->sched_wq)
return -ENOMEM;

return drm_sched_init(sched, &nouveau_sched_ops,
return drm_sched_init(sched, &nouveau_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
NULL, NULL, "nouveau_sched", drm->dev->dev);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/panfrost/panfrost_job.c
Original file line number Diff line number Diff line change
Expand Up @@ -852,7 +852,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
js->queue[j].fence_context = dma_fence_context_alloc(1);

ret = drm_sched_init(&js->queue[j].sched,
&panfrost_sched_ops,
&panfrost_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
Expand Down
131 changes: 66 additions & 65 deletions drivers/gpu/drm/scheduler/sched_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
* through the jobs entity pointer.
*/

#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
Expand Down Expand Up @@ -256,6 +255,16 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
}

/**
* drm_sched_run_job_queue - enqueue run-job work
* @sched: scheduler instance
*/
static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->pause_submit))
queue_work(sched->submit_wq, &sched->work_run_job);
}

/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
Expand All @@ -275,7 +284,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
drm_sched_run_job_queue(sched);
}

/**
Expand Down Expand Up @@ -874,7 +883,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
{
if (drm_sched_can_queue(sched))
wake_up_interruptible(&sched->wake_up_worker);
drm_sched_run_job_queue(sched);
}

/**
Expand Down Expand Up @@ -985,60 +994,41 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
EXPORT_SYMBOL(drm_sched_pick_best);

/**
* drm_sched_blocked - check if the scheduler is blocked
* drm_sched_run_job_work - main scheduler thread
*
* @sched: scheduler instance
*
* Returns true if blocked, otherwise false.
* @w: run job work
*/
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
static void drm_sched_run_job_work(struct work_struct *w)
{
if (kthread_should_park()) {
kthread_parkme();
return true;
}

return false;
}

/**
* drm_sched_main - main scheduler thread
*
* @param: scheduler instance
*
* Returns 0.
*/
static int drm_sched_main(void *param)
{
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_run_job);
struct drm_sched_entity *entity;
struct drm_sched_job *cleanup_job;
int r;

sched_set_fifo_low(current);
if (READ_ONCE(sched->pause_submit))
return;

while (!kthread_should_stop()) {
struct drm_sched_entity *entity = NULL;
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
struct drm_sched_job *cleanup_job = NULL;
cleanup_job = drm_sched_get_cleanup_job(sched);
entity = drm_sched_select_entity(sched);

wait_event_interruptible(sched->wake_up_worker,
(cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (!entity && !cleanup_job)
return; /* No more work */

if (cleanup_job)
sched->ops->free_job(cleanup_job);
if (cleanup_job)
sched->ops->free_job(cleanup_job);

if (!entity)
continue;
if (entity) {
struct dma_fence *fence;
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;

sched_job = drm_sched_entity_pop_job(entity);

if (!sched_job) {
complete_all(&entity->entity_idle);
continue;
if (!cleanup_job)
return; /* No more work */
goto again;
}

s_fence = sched_job->s_fence;
Expand Down Expand Up @@ -1069,14 +1059,18 @@ static int drm_sched_main(void *param)

wake_up(&sched->job_scheduled);
}
return 0;

again:
drm_sched_run_job_queue(sched);
}

/**
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
* @ops: backend operations for this scheduler
* @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
* allocated and used
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
* @hw_submission: number of hw submissions that can be in flight
* @hang_limit: number of times to allow a job to hang before dropping it
Expand All @@ -1091,6 +1085,7 @@ static int drm_sched_main(void *param)
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
struct workqueue_struct *submit_wq,
u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
Expand Down Expand Up @@ -1121,46 +1116,49 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
return 0;
}

if (submit_wq) {
sched->submit_wq = submit_wq;
sched->own_submit_wq = false;
} else {
sched->submit_wq = alloc_ordered_workqueue(name, 0);
if (!sched->submit_wq)
return -ENOMEM;

sched->own_submit_wq = true;
}
ret = -ENOMEM;
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq) {
drm_err(sched, "%s: out of memory for sched_rq\n", __func__);
return -ENOMEM;
}
if (!sched->sched_rq)
goto Out_free;
sched->num_rqs = num_rqs;
ret = -ENOMEM;
for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
goto Out_unroll;
drm_sched_rq_init(sched, sched->sched_rq[i]);
}

init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);

/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
ret = PTR_ERR(sched->thread);
sched->thread = NULL;
DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
goto Out_unroll;
}
sched->pause_submit = false;

sched->ready = true;
return 0;
Out_unroll:
for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
kfree(sched->sched_rq[i]);
Out_free:
kfree(sched->sched_rq);
sched->sched_rq = NULL;
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return ret;
}
Expand All @@ -1178,8 +1176,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *s_entity;
int i;

if (sched->thread)
kthread_stop(sched->thread);
drm_sched_wqueue_stop(sched);

for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = sched->sched_rq[i];
Expand All @@ -1202,6 +1199,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);

if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
Expand Down Expand Up @@ -1262,7 +1261,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
*/
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
{
return !!sched->thread;
return sched->ready;
}
EXPORT_SYMBOL(drm_sched_wqueue_ready);

Expand All @@ -1273,7 +1272,8 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready);
*/
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
{
kthread_park(sched->thread);
WRITE_ONCE(sched->pause_submit, true);
cancel_work_sync(&sched->work_run_job);
}
EXPORT_SYMBOL(drm_sched_wqueue_stop);

Expand All @@ -1284,6 +1284,7 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop);
*/
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
{
kthread_unpark(sched->thread);
WRITE_ONCE(sched->pause_submit, false);
queue_work(sched->submit_wq, &sched->work_run_job);
}
EXPORT_SYMBOL(drm_sched_wqueue_start);
10 changes: 5 additions & 5 deletions drivers/gpu/drm/v3d/v3d_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ v3d_sched_init(struct v3d_dev *v3d)
int ret;

ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
&v3d_bin_sched_ops,
&v3d_bin_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
Expand All @@ -397,7 +397,7 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;

ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
&v3d_render_sched_ops,
&v3d_render_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
Expand All @@ -406,7 +406,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;

ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
&v3d_tfu_sched_ops,
&v3d_tfu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
Expand All @@ -416,7 +416,7 @@ v3d_sched_init(struct v3d_dev *v3d)

if (v3d_has_csd(v3d)) {
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
&v3d_csd_sched_ops,
&v3d_csd_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
Expand All @@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;

ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
&v3d_cache_clean_sched_ops,
&v3d_cache_clean_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
Expand Down
Loading

0 comments on commit a6149f0

Please sign in to comment.