Skip to content

Commit

Permalink
drm/i915/gvt: vGPU command scanner
Browse files Browse the repository at this point in the history
This patch introduces a command scanner to scan guest command buffers.

Signed-off-by: Yulei Zhang <yulei.zhang@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
  • Loading branch information
Zhi Wang authored and Zhenyu Wang committed Oct 14, 2016
1 parent 1786571 commit be1da70
Show file tree
Hide file tree
Showing 11 changed files with 3,212 additions and 4 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gvt/Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o

ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
2,878 changes: 2,878 additions & 0 deletions drivers/gpu/drm/i915/gvt/cmd_parser.c

Large diffs are not rendered by default.

49 changes: 49 additions & 0 deletions drivers/gpu/drm/i915/gvt/cmd_parser.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_CMD_PARSER_H_
#define _GVT_CMD_PARSER_H_

#define GVT_CMD_HASH_BITS 7

void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);

int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);

int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);

int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);

#endif
3 changes: 3 additions & 0 deletions drivers/gpu/drm/i915/gvt/debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,7 @@
#define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)

#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)

#endif
158 changes: 157 additions & 1 deletion drivers/gpu/drm/i915/gvt/execlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,109 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))


#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;

*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}

return 0;
}

static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct i915_vma *vma;
unsigned long gma;

/* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;

list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
}
i915_gem_object_unpin_pages(entry_obj->obj);

/* update the relocate gma with shadow batch buffer*/
gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
WARN_ON(!IS_ALIGNED(gma, 4));
set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
}
}
}

static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ring_id = wa_ctx->workload->ring_id;
struct i915_gem_context *shadow_ctx =
wa_ctx->workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;

page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);

shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;

kunmap_atomic(shadow_ring_context);
return 0;
}

static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned long gma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;

if (wa_ctx->indirect_ctx.size == 0)
return;

vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return;
}
i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);

gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
wa_ctx->indirect_ctx.shadow_gma = gma;

wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);

update_wa_ctx_2_shadow_ctx(wa_ctx);
}

static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
Expand All @@ -372,6 +475,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
intel_vgpu_pin_mm(workload->shadow_mm);
intel_vgpu_sync_oos_pages(workload->vgpu);
intel_vgpu_flush_post_shadow(workload->vgpu);
prepare_shadow_batch_buffer(workload);
prepare_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->emulate_schedule_in)
return 0;

Expand All @@ -381,6 +486,35 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
}

static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;

list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
drm_gem_object_unreference(&(entry_obj->obj->base));
kvfree(entry_obj->va);
list_del(&entry_obj->list);
kfree(entry_obj);
}
}
}

static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (wa_ctx->indirect_ctx.size == 0)
return;

drm_gem_object_unreference(&(wa_ctx->indirect_ctx.obj->base));
kvfree(wa_ctx->indirect_ctx.shadow_va);
}

static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
Expand All @@ -394,6 +528,9 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);

release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);

if (workload->status || vgpu->resetting)
goto out;

Expand Down Expand Up @@ -487,7 +624,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;

ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Expand Down Expand Up @@ -532,6 +669,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);

INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);

init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
Expand All @@ -549,6 +687,24 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;

if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);

workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.workload = workload;

WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}

if (emulate_schedule_in)
memcpy(&workload->elsp_dwords,
&vgpu->execlist[ring_id].elsp_dwords,
Expand Down
11 changes: 10 additions & 1 deletion drivers/gpu/drm/i915/gvt/gvt.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ static void init_device_info(struct intel_gvt *gvt)
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
}

Expand Down Expand Up @@ -177,6 +179,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return;

clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt);
Expand Down Expand Up @@ -249,14 +252,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_workload_scheduler;

ret = init_service_thread(gvt);
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;

ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;

gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true;
return 0;

out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
Expand Down
4 changes: 4 additions & 0 deletions drivers/gpu/drm/i915/gvt/gvt.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include "scheduler.h"
#include "sched_policy.h"
#include "render.h"
#include "cmd_parser.h"

#define GVT_MAX_VGPU 8

Expand All @@ -71,6 +72,8 @@ struct intel_gvt_device_info {
u32 gtt_start_offset;
u32 gtt_entry_size;
u32 gtt_entry_size_shift;
int gmadr_bytes_in_cmd;
u32 max_surface_size;
};

/* GM resources owned by a vGPU */
Expand Down Expand Up @@ -203,6 +206,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);

struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
Expand Down
4 changes: 4 additions & 0 deletions drivers/gpu/drm/i915/gvt/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,4 +226,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);

int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);

#endif /* _GVT_INTERRUPT_H_ */
14 changes: 14 additions & 0 deletions drivers/gpu/drm/i915/gvt/scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)

mutex_lock(&gvt->lock);

ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto err;

ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err;

ret = populate_shadow_context(workload);
if (ret)
goto err;
Expand Down Expand Up @@ -345,6 +353,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
int event;

mutex_lock(&gvt->lock);

Expand All @@ -355,6 +364,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
!atomic_read(&workload->shadow_ctx_active));

update_guest_context(workload);

for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(workload->vgpu,
event);
}

gvt_dbg_sched("ring id %d complete workload %p status %d\n",
Expand Down
Loading

0 comments on commit be1da70

Please sign in to comment.