Skip to content

Commit

Permalink
drm/i915/gvt: handle workload lifecycle properly
Browse files Browse the repository at this point in the history
Currently i915 has a request replay mechanism which can make sure
the request can be replayed after a GPU reset. With this mechanism,
gvt should wait until the GVT request seqno passed before complete
the current workload. So that there should be a context switch interrupt
come before gvt free the workload. In this way, workload lifecylce
matches with the i915 request lifecycle. The workload can only be freed
after the request is completed.

v2: use gvt_dbg_sched instead of gvt_err to print when wait again

Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
  • Loading branch information
Chuanxiao Dong authored and Zhenyu Wang committed Mar 6, 2017
1 parent 4ec3dd8 commit 8f1117a
Showing 1 changed file with 33 additions and 16 deletions.
49 changes: 33 additions & 16 deletions drivers/gpu/drm/i915/gvt/scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
* good. If the status is NOT -EINPROGRESS means there
* is something wrong happened during dispatching and
* the status should not be set to zero
*/
if (workload->status == -EINPROGRESS)
workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
Expand Down Expand Up @@ -362,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;

if (!workload->status && !vgpu->resetting) {
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));

update_guest_context(workload);
i915_gem_request_put(fetch_and_zero(&workload->req));

for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
if (!workload->status && !vgpu->resetting) {
update_guest_context(workload);

for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
}

gvt_dbg_sched("ring id %d complete workload %p status %d\n",
Expand Down Expand Up @@ -400,7 +417,6 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
Expand Down Expand Up @@ -449,23 +465,24 @@ static int workload_thread(void *priv)

gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);

lret = i915_wait_request(workload->req,
retry:
i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n");
} else {
workload->status = 0;
/* I915 has replay mechanism and a request will be replayed
* if there is i915 reset. So the seqno will be updated anyway.
* If the seqno is not updated yet after waiting, which means
* the replay may still be in progress and we can wait again.
*/
if (!i915_gem_request_completed(workload->req)) {
gvt_dbg_sched("workload %p not completed, wait again\n",
workload);
goto retry;
}

complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);

if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));

complete_current_workload(gvt, ring_id);

if (need_force_wake)
Expand Down

0 comments on commit 8f1117a

Please sign in to comment.