Skip to content

Commit

Permalink
drm/i915/gvt: replace the gvt_err with gvt_vgpu_err
Browse files Browse the repository at this point in the history
gvt_err should be used only for the very few critical error message
during host i915 drvier initialization. This patch
1. removes the redundant gvt_err;
2. creates a new gvt_vgpu_err to show errors caused by vgpu;
3. replaces the most gvt_err with gvt_vgpu_err;
4. leaves very few gvt_err for dumping gvt error during host gvt
   initialization.

v2. change name to gvt_vgpu_err and add vgpu id to the message. (Kevin)
    add gpu id to gvt_vgpu_err. (Zhi)
v3. remove gpu id from gvt_vgpu_err caller. (Zhi)
v4. add vgpu check to the gvt_vgpu_err macro. (Zhiyuan)
v5. add comments for v3 and v4.
v6. split the big patch into two, with this patch only for checking
    gvt_vgpu_err. (Zhenyu)
v7. rebase to staging branch
v8. rebase to fix branch

Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
  • Loading branch information
Tina Zhang authored and Zhenyu Wang committed Mar 17, 2017
1 parent 4938ca9 commit 695fbc0
Show file tree
Hide file tree
Showing 12 changed files with 180 additions and 158 deletions.
8 changes: 4 additions & 4 deletions drivers/gpu/drm/i915/gvt/aperture_gm.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
const char *item;

if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n");
gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}

Expand Down Expand Up @@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0;

no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
gvt_vgpu_err("fail to allocate resource %s\n", item);
gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
Expand Down
84 changes: 48 additions & 36 deletions drivers/gpu/drm/i915/gvt/cmd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -843,20 +843,19 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_gvt *gvt = vgpu->gvt;

if (offset + 4 > gvt->device_info.mmio_size) {
gvt_err("%s access to (%x) outside of MMIO range\n",
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
return -EINVAL;
}

if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_err("vgpu%d: %s access to non-render register (%x)\n",
s->vgpu->id, cmd, offset);
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return 0;
}

if (is_shadowed_mmio(offset)) {
gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
s->vgpu->id, offset);
gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
return 0;
}

Expand Down Expand Up @@ -1152,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2);
Expand Down Expand Up @@ -1190,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
break;

default:
gvt_err("unknown plane code %d\n", plane);
gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL;
}

Expand Down Expand Up @@ -1297,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{
struct mi_display_flip_command_info info;
struct intel_vgpu *vgpu = s->vgpu;
int ret;
int i;
int len = cmd_length(s);

ret = decode_mi_display_flip(s, &info);
if (ret) {
gvt_err("fail to decode MI display flip command\n");
gvt_vgpu_err("fail to decode MI display flip command\n");
return ret;
}

ret = check_mi_display_flip(s, &info);
if (ret) {
gvt_err("invalid MI display flip command\n");
gvt_vgpu_err("invalid MI display flip command\n");
return ret;
}

ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) {
gvt_err("fail to update plane mmio\n");
gvt_vgpu_err("fail to update plane mmio\n");
return ret;
}

Expand Down Expand Up @@ -1373,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
int ret;

if (op_size > max_surface_size) {
gvt_err("command address audit fail name %s\n", s->info->name);
gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
return -EINVAL;
}

Expand All @@ -1390,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
return 0;
err:
gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);

pr_err("cmd dump: ");
Expand Down Expand Up @@ -1435,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)

static inline int unexpected_cmd(struct parser_exec_state *s)
{
gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
s->vgpu->id, s->info->name);
struct intel_vgpu *vgpu = s->vgpu;

gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);

return -EINVAL;
}

Expand Down Expand Up @@ -1539,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid gma address: %lx\n", gma);
gvt_vgpu_err("invalid gma address: %lx\n", gma);
return -EFAULT;
}

Expand Down Expand Up @@ -1580,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
uint32_t bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;

/* get the start gm address of the batch buffer */
Expand All @@ -1588,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)

info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
Expand All @@ -1597,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
Expand All @@ -1622,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
uint32_t bb_size;
void *dst = NULL;
Expand Down Expand Up @@ -1656,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)

ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) {
gvt_err("failed to set shadow batch to CPU\n");
gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src;
}

Expand All @@ -1668,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
gma, gma + bb_size,
dst);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src;
}

Expand Down Expand Up @@ -1699,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{
bool second_level;
int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;

if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL;
}

second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL;
}

Expand All @@ -1725,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s);
if (ret < 0)
gvt_err("invalid shadow batch buffer\n");
gvt_vgpu_err("invalid shadow batch buffer\n");
} else {
/* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s);
Expand Down Expand Up @@ -2452,14 +2459,15 @@ static int cmd_parser_exec(struct parser_exec_state *s)
int ret = 0;
cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom;
struct intel_vgpu *vgpu = s->vgpu;

t0 = get_cycles();

cmd = cmd_val(s, 0);

info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
Expand All @@ -2475,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
gvt_err("%s handler error\n", info->name);
gvt_vgpu_err("%s handler error\n", info->name);
return ret;
}
}
Expand All @@ -2486,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s);
if (ret) {
gvt_err("%s IP advance error\n", info->name);
gvt_vgpu_err("%s IP advance error\n", info->name);
return ret;
}
}
Expand All @@ -2509,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,

unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;

gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail;
Expand All @@ -2520,15 +2529,15 @@ static int command_scan(struct parser_exec_state *s,
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_err("ip_gma %lx out of ring scope."
gvt_vgpu_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start,
gma_bottom);
parser_exec_state_dump(s);
return -EINVAL;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_err("ip_gma %lx out of range."
gvt_vgpu_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start,
rb_head, rb_tail);
Expand All @@ -2538,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
}
ret = cmd_parser_exec(s);
if (ret) {
gvt_err("cmd parser error\n");
gvt_vgpu_err("cmd parser error\n");
parser_exec_state_dump(s);
break;
}
Expand Down Expand Up @@ -2662,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
Expand All @@ -2674,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
Expand All @@ -2685,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ret;
struct intel_vgpu *vgpu = workload->vgpu;

ret = shadow_workload_ring_buffer(workload);
if (ret) {
gvt_err("fail to shadow workload ring_buffer\n");
gvt_vgpu_err("fail to shadow workload ring_buffer\n");
return ret;
}

ret = scan_workload(workload);
if (ret) {
gvt_err("scan workload error\n");
gvt_vgpu_err("scan workload error\n");
return ret;
}
return 0;
Expand All @@ -2704,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
Expand All @@ -2717,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
/* get the va of the shadow batch buffer */
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(map)) {
gvt_err("failed to vmap shadow indirect ctx\n");
gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
ret = PTR_ERR(map);
goto put_obj;
}

ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) {
gvt_err("failed to set shadow indirect ctx to CPU\n");
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
}

Expand All @@ -2733,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
guest_gma, guest_gma + ctx_size,
map);
if (ret) {
gvt_err("fail to copy guest indirect ctx\n");
gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}

Expand Down Expand Up @@ -2767,21 +2778,22 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;

if (wa_ctx->indirect_ctx.size == 0)
return 0;

ret = shadow_indirect_ctx(wa_ctx);
if (ret) {
gvt_err("fail to shadow indirect ctx\n");
gvt_vgpu_err("fail to shadow indirect ctx\n");
return ret;
}

combine_wa_ctx(wa_ctx);

ret = scan_wa_ctx(wa_ctx);
if (ret) {
gvt_err("scan wa ctx error\n");
gvt_vgpu_err("scan wa ctx error\n");
return ret;
}

Expand Down
8 changes: 8 additions & 0 deletions drivers/gpu/drm/i915/gvt/debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)

#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
else \
DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)

#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)

Expand Down
Loading

0 comments on commit 695fbc0

Please sign in to comment.