Skip to content

Commit

Permalink
drm/i915: Store a pointer to intel_context in i915_request
Browse files Browse the repository at this point in the history
To ease the frequent and ugly pointer dance of
&request->gem_context->engine[request->engine->id] during request
submission, store that pointer as request->hw_context. One major
advantage that we will exploit later is that this decouples the logical
context state from the engine itself.

v2: Set mock_context->ops so we don't crash and burn in selftests.
    Cleanups from Tvrtko.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-3-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed May 18, 2018
1 parent 01278cb commit 1fc44d9
Show file tree
Hide file tree
Showing 20 changed files with 321 additions and 296 deletions.
6 changes: 3 additions & 3 deletions drivers/gpu/drm/i915/gvt/mmio_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,

#define CTX_CONTEXT_CONTROL_VAL 0x03

bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
bool is_inhibit_context(struct intel_context *ce)
{
u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);

Expand Down Expand Up @@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
!is_inhibit_context(s->shadow_ctx, ring_id))
!is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;

if (mmio->mask)
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gvt/mmio_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,

void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);

bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
bool is_inhibit_context(struct intel_context *ce);

int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
Expand Down
141 changes: 52 additions & 89 deletions drivers/gpu/drm/i915/gvt/scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(

static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;

Expand Down Expand Up @@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
Expand Down Expand Up @@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}

static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
static void shadow_context_descriptor_update(struct intel_context *ce)
{
struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;

desc = ce->lrc_desc;
Expand All @@ -292,20 +286,19 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
desc |= ctx->desc_template & ((1ULL << 12) - 1);
desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);

ce->lrc_desc = desc;
}

static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
struct i915_request *req = workload->req;

if (IS_KABYLAKE(req->i915) &&
is_inhibit_context(req->gem_context, req->engine->id))
if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);

/* allocate shadow ring buffer */
Expand Down Expand Up @@ -353,68 +346,63 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_ring *ring;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_context *ce;
int ret;

lockdep_assert_held(&dev_priv->drm.struct_mutex);

if (workload->shadowed)
if (workload->req)
return 0;

/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ce = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ce)) {
gvt_vgpu_err("fail to pin shadow context\n");
return PTR_ERR(ce);
}

shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;

if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(ce);

ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto err_scan;
goto err_unpin;

if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_scan;
}

/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto err_shadow;
goto err_shadow;
}

ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
goto err_shadow;

return 0;

err_unpin:
intel_context_unpin(shadow_ctx, engine);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
err_unpin:
intel_context_unpin(ce);
return ret;
}

static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
Expand All @@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
return 0;

err_unpin:
intel_context_unpin(shadow_ctx, engine);
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
}
Expand Down Expand Up @@ -517,31 +504,20 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
return ret;
}

static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &workload->vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;

page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
struct intel_vgpu_workload *workload =
container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
struct i915_request *rq = workload->req;
struct execlist_ring_context *shadow_ring_context =
(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;

shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;

kunmap_atomic(shadow_ring_context);
return 0;
}

static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
Expand Down Expand Up @@ -670,12 +646,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
int ret;

gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
Expand All @@ -687,10 +660,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;

ret = prepare_workload(workload);
if (ret) {
intel_context_unpin(shadow_ctx, engine);
goto out;
}

out:
if (ret)
Expand Down Expand Up @@ -765,27 +734,23 @@ static struct intel_vgpu_workload *pick_next_workload(

static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;

gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);

context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);

context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;

if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;

i = 2;
Expand Down Expand Up @@ -858,6 +823,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int event;

mutex_lock(&gvt->lock);
Expand All @@ -866,11 +832,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
struct drm_i915_private *dev_priv =
workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine =
dev_priv->engine[workload->ring_id];
rq = fetch_and_zero(&workload->req);
if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));

Expand All @@ -886,8 +849,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}

i915_request_put(fetch_and_zero(&workload->req));

if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
Expand All @@ -896,10 +857,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
mutex_lock(&dev_priv->drm.struct_mutex);

/* unpin shadow ctx as the shadow_ctx update is done */
intel_context_unpin(s->shadow_ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_lock(&rq->i915->drm.struct_mutex);
intel_context_unpin(rq->hw_context);
mutex_unlock(&rq->i915->drm.struct_mutex);

i915_request_put(rq);
}

gvt_dbg_sched("ring id %d complete workload %p status %d\n",
Expand Down Expand Up @@ -1270,7 +1234,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);

workload->status = -EINPROGRESS;
workload->shadowed = false;
workload->vgpu = vgpu;

return workload;
Expand Down
1 change: 0 additions & 1 deletion drivers/gpu/drm/i915/gvt/scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadowed;
int status;

struct intel_vgpu_mm *shadow_mm;
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -1950,6 +1950,7 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;

struct intel_context *pinned_ctx;
u32 specific_ctx_id;

struct hrtimer poll_check_timer;
Expand Down
12 changes: 6 additions & 6 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -3181,14 +3181,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);

for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
struct intel_context *ce;

i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
intel_context_unpin(ctx, engine);
ce = fetch_and_zero(&engine->last_retired_context);
if (ce)
intel_context_unpin(ce);

/*
* Ostensibily, we always want a context loaded for powersaving,
Expand Down Expand Up @@ -4897,13 +4897,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)

static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
struct i915_gem_context *kernel_context = i915->kernel_context;
struct i915_gem_context *kctx = i915->kernel_context;
struct intel_engine_cs *engine;
enum intel_engine_id id;

for_each_engine(engine, i915, id) {
GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
GEM_BUG_ON(engine->last_retired_context != kernel_context);
GEM_BUG_ON(engine->last_retired_context->gem_context != kctx);
}
}

Expand Down
Loading

0 comments on commit 1fc44d9

Please sign in to comment.