From a58c38aa6cdf0d00727cafd85cd2354dec46401a Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Fri, 8 Sep 2017 21:37:48 +0800 Subject: [PATCH 01/50] drm/i915/gvt: Add support for opregion virtualization opregion emulated with a copy from host which leads to some display bugs such as guest resolution adjustment failure due to host opregion fail to claim port D support. with a fake opregion table provided to fully emulate opregion to meet guest port requirement. v1 - initial patch v2 - reforamt opregion arrary with 0x02x output v3 - opregion array removed with opregion generation on host initizaiton v4 - rebased v3 patch from stable branch to staging branch which also has different struct child_device_config and addressed v3 review comments. Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/opregion.c | 180 ++++++++++++++++++++++++---- drivers/gpu/drm/i915/gvt/reg.h | 3 + 2 files changed, 159 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 311799136d7f6..db8abac5faad4 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -25,9 +25,133 @@ #include "i915_drv.h" #include "gvt.h" +/* + * Note: Only for GVT-g virtual VBT generation, other usage must + * not do like this. + */ +#define _INTEL_BIOS_PRIVATE +#include "intel_vbt_defs.h" + +#define OPREGION_SIGNATURE "IntelGraphicsMem" +#define MBOX_VBT (1<<3) + +/* device handle */ +#define DEVICE_TYPE_CRT 0x01 +#define DEVICE_TYPE_EFP1 0x04 +#define DEVICE_TYPE_EFP2 0x40 +#define DEVICE_TYPE_EFP3 0x20 +#define DEVICE_TYPE_EFP4 0x10 + +#define DEV_SIZE 38 + +struct opregion_header { + u8 signature[16]; + u32 size; + u32 opregion_ver; + u8 bios_ver[32]; + u8 vbios_ver[16]; + u8 driver_ver[16]; + u32 mboxes; + u32 driver_model; + u32 pcon; + u8 dver[32]; + u8 rsvd[124]; +} __packed; + +struct bdb_data_header { + u8 id; + u16 size; /* data size */ +} __packed; + +struct vbt { + /* header->bdb_offset point to bdb_header offset */ + struct vbt_header header; + struct bdb_header bdb_header; + + struct bdb_data_header general_features_header; + struct bdb_general_features general_features; + + struct bdb_data_header general_definitions_header; + struct bdb_general_definitions general_definitions; + struct child_device_config child0; + struct child_device_config child1; + struct child_device_config child2; + struct child_device_config child3; + + struct bdb_data_header driver_features_header; + struct bdb_driver_features driver_features; +}; + +static void virt_vbt_generation(struct vbt *v) +{ + int num_child; + + memset(v, 0, sizeof(struct vbt)); + + v->header.signature[0] = '$'; + v->header.signature[1] = 'V'; + v->header.signature[2] = 'B'; + v->header.signature[3] = 'T'; + + /* there's features depending on version! */ + v->header.version = 155; + v->header.header_size = sizeof(v->header); + v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); + v->header.bdb_offset = offsetof(struct vbt, bdb_header); + + strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); + v->bdb_header.version = 198; /* child_dev_size = 38 */ + v->bdb_header.header_size = sizeof(v->bdb_header); + + v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) + - sizeof(struct bdb_header); + + /* general features */ + v->general_features_header.id = BDB_GENERAL_FEATURES; + v->general_features_header.size = sizeof(struct bdb_general_features); + v->general_features.int_crt_support = 0; + v->general_features.int_tv_support = 0; + + /* child device */ + num_child = 4; /* each port has one child */ + v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; + /* size will include child devices */ + v->general_definitions_header.size = + sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; + v->general_definitions.child_dev_size = DEV_SIZE; + + /* portA */ + v->child0.handle = DEVICE_TYPE_EFP1; + v->child0.device_type = DEVICE_TYPE_DP; + v->child0.dvo_port = DVO_PORT_DPA; + v->child0.aux_channel = DP_AUX_A; + + /* portB */ + v->child1.handle = DEVICE_TYPE_EFP2; + v->child1.device_type = DEVICE_TYPE_DP; + v->child1.dvo_port = DVO_PORT_DPB; + v->child1.aux_channel = DP_AUX_B; + + /* portC */ + v->child2.handle = DEVICE_TYPE_EFP3; + v->child2.device_type = DEVICE_TYPE_DP; + v->child2.dvo_port = DVO_PORT_DPC; + v->child2.aux_channel = DP_AUX_C; + + /* portD */ + v->child3.handle = DEVICE_TYPE_EFP4; + v->child3.device_type = DEVICE_TYPE_DP; + v->child3.dvo_port = DVO_PORT_DPD; + v->child3.aux_channel = DP_AUX_D; + + /* driver features */ + v->driver_features_header.id = BDB_DRIVER_FEATURES; + v->driver_features_header.size = sizeof(struct bdb_driver_features); + v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS; +} + static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) { - u8 *buf; int i; if (WARN((vgpu_opregion(vgpu)->va), @@ -35,26 +159,11 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | - __GFP_ZERO, - get_order(INTEL_GVT_OPREGION_SIZE)); - - if (!vgpu_opregion(vgpu)->va) - return -ENOMEM; - - memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, - INTEL_GVT_OPREGION_SIZE); + vgpu_opregion(vgpu)->va = vgpu->gvt->opregion.opregion_va; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - /* for unknown reason, the value in LID field is incorrect - * which block the windows guest, so workaround it by force - * setting it to "OPEN" - */ - buf = (u8 *)vgpu_opregion(vgpu)->va; - buf[INTEL_GVT_OPREGION_CLID] = 0x3; - return 0; } @@ -139,7 +248,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) */ void intel_gvt_clean_opregion(struct intel_gvt *gvt) { - memunmap(gvt->opregion.opregion_va); + free_pages((unsigned long)gvt->opregion.opregion_va, + get_order(INTEL_GVT_OPREGION_SIZE)); gvt->opregion.opregion_va = NULL; } @@ -152,17 +262,39 @@ void intel_gvt_clean_opregion(struct intel_gvt *gvt) */ int intel_gvt_init_opregion(struct intel_gvt *gvt) { + u8 *buf; + struct opregion_header *header; + struct vbt v; + gvt_dbg_core("init host opregion\n"); - pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION, - &gvt->opregion.opregion_pa); + gvt->opregion.opregion_va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); - gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa, - INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB); if (!gvt->opregion.opregion_va) { - gvt_err("fail to map host opregion\n"); - return -EFAULT; + gvt_err("fail to get memory for virt opregion\n"); + return -ENOMEM; } + + /* emulated opregion with VBT mailbox only */ + header = (struct opregion_header *)gvt->opregion.opregion_va; + memcpy(header->signature, OPREGION_SIGNATURE, + sizeof(OPREGION_SIGNATURE)); + header->mboxes = MBOX_VBT; + + /* for unknown reason, the value in LID field is incorrect + * which block the windows guest, so workaround it by force + * setting it to "OPEN" + */ + buf = (u8 *)gvt->opregion.opregion_va; + buf[INTEL_GVT_OPREGION_CLID] = 0x3; + + /* emulated vbt from virt vbt generation */ + virt_vbt_generation(&v); + memcpy(gvt->opregion.opregion_va + INTEL_GVT_OPREGION_VBT_OFFSET, + &v, sizeof(struct vbt)); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d01c77a0f7ac..83f2f63d7eeba 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -51,6 +51,9 @@ #define INTEL_GVT_OPREGION_PAGES 2 #define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) +#define INTEL_GVT_OPREGION_VBT_OFFSET 0x400 +#define INTEL_GVT_OPREGION_VBT_SIZE \ + (INTEL_GVT_OPREGION_SIZE - INTEL_GVT_OPREGION_VBT_OFFSET) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) From 54cff6479fd87e4a941e3686d93faa734586922b Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 16:40:04 +0800 Subject: [PATCH 02/50] drm/i915/gvt: Make elsp_dwords in the right order The context descriptors in elsp_dwords are stored in a reversed order and the definition of context descriptor is also reversed. The revesred stuff is hard to be used and might cause misunderstanding. Make them in the right oder for following code re-factoring. Tested on my SKL NUC. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 22 +++++++++++----------- drivers/gpu/drm/i915/gvt/execlist.h | 8 ++++---- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 4427be18e4a93..9f02072051179 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -511,8 +511,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) if (!workload->emulate_schedule_in) return 0; - ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); if (!ret) @@ -770,21 +770,21 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) { struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; - struct execlist_ctx_descriptor_format desc[2]; + struct execlist_ctx_descriptor_format *desc[2]; int i, ret; - desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); - desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); + desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); + desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); - if (!desc[0].valid) { + if (!desc[0]->valid) { gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n"); goto inv_desc; } for (i = 0; i < ARRAY_SIZE(desc); i++) { - if (!desc[i].valid) + if (!desc[i]->valid) continue; - if (!desc[i].privilege_access) { + if (!desc[i]->privilege_access) { gvt_vgpu_err("unexpected GGTT elsp submission\n"); goto inv_desc; } @@ -792,9 +792,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) /* submit workload */ for (i = 0; i < ARRAY_SIZE(desc); i++) { - if (!desc[i].valid) + if (!desc[i]->valid) continue; - ret = submit_context(vgpu, ring_id, &desc[i], i == 0); + ret = submit_context(vgpu, ring_id, desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); return ret; @@ -805,7 +805,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) inv_desc: gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", - desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw); + desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 7eced40a1e309..427e40e64d41e 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -36,10 +36,6 @@ #define _GVT_EXECLIST_H_ struct execlist_ctx_descriptor_format { - union { - u32 udw; - u32 context_id; - }; union { u32 ldw; struct { @@ -54,6 +50,10 @@ struct execlist_ctx_descriptor_format { u32 lrca : 20; }; }; + union { + u32 udw; + u32 context_id; + }; }; struct execlist_status_format { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index a5bed2e71b926..820d266705411 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1453,7 +1453,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist = &vgpu->execlist[ring_id]; - execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; + execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) From 874b6a910e6cc094629bd2634d14061cf5eb7690 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 20:08:18 +0800 Subject: [PATCH 03/50] drm/i915/gvt: Rename intel_vgpu_{init, clean}_gvt_context() To move workload related functions into scheduler.c, an expected way is to collect all the init/clean functions related to vGPU workload submission into fewer functions. Rename intel_vgpu_{init, clean}_gvt_context() for above usage in future. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 21 +++++++++++++++++++-- drivers/gpu/drm/i915/gvt/scheduler.h | 4 ++-- drivers/gpu/drm/i915/gvt/vgpu.c | 6 +++--- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 42cc61230ca7d..5913bcb7b73e5 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -709,12 +709,29 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) return ret; } -void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) +/** + * intel_vgpu_clean_submission - free submission-related resource for vGPU + * @vgpu: a vGPU + * + * This function is called when a vGPU is being destroyed. + * + */ +void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { i915_gem_context_put(vgpu->shadow_ctx); } -int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) +/** + * intel_vgpu_setup_submission - setup submission-related resource for vGPU + * @vgpu: a vGPU + * + * This function is called when a vGPU is being created. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { atomic_set(&vgpu->running_workload_num, 0); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 2d694f6c09076..c216aefaa73e1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -137,9 +137,9 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt); void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); -int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu); +int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); -void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); +void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); #endif diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 02c61a1ad56a2..3d69871d28e9e 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -254,7 +254,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); - intel_vgpu_clean_gvt_context(vgpu); + intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_execlist(vgpu); intel_vgpu_clean_display(vgpu); intel_vgpu_clean_opregion(vgpu); @@ -376,7 +376,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_display; - ret = intel_vgpu_init_gvt_context(vgpu); + ret = intel_vgpu_setup_submission(vgpu); if (ret) goto out_clean_execlist; @@ -389,7 +389,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, return vgpu; out_clean_shadow_ctx: - intel_vgpu_clean_gvt_context(vgpu); + intel_vgpu_clean_submission(vgpu); out_clean_execlist: intel_vgpu_clean_execlist(vgpu); out_clean_display: From 9a9829e9eb8bc4b4e870ce15a8904a32991608d5 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 20:28:09 +0800 Subject: [PATCH 04/50] drm/i915/gvt: Move workload cache init/clean into intel_vgpu_{setup, clean}_submission() Move vGPU workload cache initialization/de-initialization into intel_vgpu_{setup, clean}_submission() since they are not specific to execlist stuffs. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 15 +-------------- drivers/gpu/drm/i915/gvt/scheduler.c | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 9f02072051179..0ee40a5ee1923 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -856,14 +856,12 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) struct intel_engine_cs *engine; clean_workloads(vgpu, ALL_ENGINES); - kmem_cache_destroy(vgpu->workloads); for_each_engine(engine, vgpu->gvt->dev_priv, i) { kfree(vgpu->reserve_ring_buffer_va[i]); vgpu->reserve_ring_buffer_va[i] = NULL; vgpu->reserve_ring_buffer_size[i] = 0; } - } #define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) @@ -872,19 +870,8 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) enum intel_engine_id i; struct intel_engine_cs *engine; - /* each ring has a virtual execlist engine */ - for_each_engine(engine, vgpu->gvt->dev_priv, i) { + for_each_engine(engine, vgpu->gvt->dev_priv, i) init_vgpu_execlist(vgpu, i); - INIT_LIST_HEAD(&vgpu->workload_q_head[i]); - } - - vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", - sizeof(struct intel_vgpu_workload), 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vgpu->workloads) - return -ENOMEM; /* each ring has a shadow ring buffer until vgpu destroyed */ for_each_engine(engine, vgpu->gvt->dev_priv, i) { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 5913bcb7b73e5..81952139b00cc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -719,6 +719,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { i915_gem_context_put(vgpu->shadow_ctx); + kmem_cache_destroy(vgpu->workloads); } /** @@ -733,7 +734,9 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { - atomic_set(&vgpu->running_workload_num, 0); + enum intel_engine_id i; + struct intel_engine_cs *engine; + int ret; vgpu->shadow_ctx = i915_gem_context_create_gvt( &vgpu->gvt->dev_priv->drm); @@ -742,5 +745,24 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", + sizeof(struct intel_vgpu_workload), 0, + SLAB_HWCACHE_ALIGN, + NULL); + + if (!vgpu->workloads) { + ret = -ENOMEM; + goto out_shadow_ctx; + } + + for_each_engine(engine, vgpu->gvt->dev_priv, i) + INIT_LIST_HEAD(&vgpu->workload_q_head[i]); + + atomic_set(&vgpu->running_workload_num, 0); + return 0; + +out_shadow_ctx: + i915_gem_context_put(vgpu->shadow_ctx); + return ret; } From 1406a14b0ed977fc18f43398b391e4bb5d744174 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 21:15:18 +0800 Subject: [PATCH 05/50] drm/i915/gvt: Introduce intel_vgpu_submission Introduce intel_vgpu_submission to hold all members related to submission in struct intel_vgpu before. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 30 ++++++++----- drivers/gpu/drm/i915/gvt/gvt.h | 17 +++++--- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- drivers/gpu/drm/i915/gvt/render.c | 9 ++-- drivers/gpu/drm/i915/gvt/scheduler.c | 65 +++++++++++++++------------- drivers/gpu/drm/i915/gvt/scheduler.h | 2 +- drivers/gpu/drm/i915/gvt/vgpu.c | 4 +- 8 files changed, 75 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 0ee40a5ee1923..f118454d2eabb 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -362,7 +362,7 @@ static void free_workload(struct intel_vgpu_workload *workload) { intel_vgpu_unpin_mm(workload->shadow_mm); intel_gvt_mm_unreference(workload->shadow_mm); - kmem_cache_free(workload->vgpu->workloads, workload); + kmem_cache_free(workload->vgpu->submission.workloads, workload); } #define get_desc_from_elsp_dwords(ed, i) \ @@ -401,7 +401,8 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) struct intel_vgpu_workload, wa_ctx); int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; + struct intel_vgpu_submission *s = &workload->vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct drm_i915_gem_object *ctx_obj = shadow_ctx->engine[ring_id].state->obj; struct execlist_ring_context *shadow_ring_context; @@ -474,6 +475,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; struct execlist_ctx_descriptor_format ctx[2]; int ring_id = workload->ring_id; int ret; @@ -514,7 +516,7 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); if (!ret) goto out; else @@ -533,7 +535,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; int ring_id = workload->ring_id; - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct intel_vgpu_workload *next_workload; struct list_head *next = workload_q_head(vgpu, ring_id)->next; bool lite_restore = false; @@ -652,6 +655,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, struct execlist_ctx_descriptor_format *desc, bool emulate_schedule_in) { + struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); struct intel_vgpu_workload *last_workload = get_last_workload(q); struct intel_vgpu_workload *workload = NULL; @@ -689,7 +693,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, gvt_dbg_el("ring id %d begin a new workload\n", ring_id); - workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL); + workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); if (!workload) return -ENOMEM; @@ -738,7 +742,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, } if (emulate_schedule_in) - workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; + workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", workload, ring_id, head, tail, start, ctl); @@ -748,7 +752,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ret = prepare_mm(workload); if (ret) { - kmem_cache_free(vgpu->workloads, workload); + kmem_cache_free(s->workloads, workload); return ret; } @@ -769,7 +773,8 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) { - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct execlist_ctx_descriptor_format *desc[2]; int i, ret; @@ -811,7 +816,8 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) { - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct execlist_context_status_pointer_format ctx_status_ptr; u32 ctx_status_ptr_reg; @@ -833,6 +839,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) { + struct intel_vgpu_submission *s = &vgpu->submission; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; @@ -841,12 +848,11 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) /* free the unsubmited workloads in the queues. */ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { list_for_each_entry_safe(pos, n, - &vgpu->workload_q_head[engine->id], list) { + &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); free_workload(pos); } - - clear_bit(engine->id, vgpu->shadow_ctx_desc_updated); + clear_bit(engine->id, s->shadow_ctx_desc_updated); } } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 9c2e7c0aa38fb..c3f84f26090aa 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -142,6 +142,15 @@ struct vgpu_sched_ctl { int weight; }; +struct intel_vgpu_submission { + struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; + struct list_head workload_q_head[I915_NUM_ENGINES]; + struct kmem_cache *workloads; + atomic_t running_workload_num; + struct i915_gem_context *shadow_ctx; + DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); +}; + struct intel_vgpu { struct intel_gvt *gvt; int id; @@ -161,16 +170,12 @@ struct intel_vgpu { struct intel_vgpu_gtt gtt; struct intel_vgpu_opregion opregion; struct intel_vgpu_display display; - struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; - struct list_head workload_q_head[I915_NUM_ENGINES]; - struct kmem_cache *workloads; - atomic_t running_workload_num; + struct intel_vgpu_submission submission; /* 1/2K for each reserve ring buffer */ void *reserve_ring_buffer_va[I915_NUM_ENGINES]; int reserve_ring_buffer_size[I915_NUM_ENGINES]; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); - struct i915_gem_context *shadow_ctx; - DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); + #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 820d266705411..00893532394ac 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1451,7 +1451,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1)) return -EINVAL; - execlist = &vgpu->execlist[ring_id]; + execlist = &vgpu->submission.execlist[ring_id]; execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 96060920a6fea..fc6878bb24968 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1188,7 +1188,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr, struct intel_vgpu *vgpu = (struct intel_vgpu *) mdev_get_drvdata(mdev); return sprintf(buf, "%u\n", - vgpu->shadow_ctx->hw_id); + vgpu->submission.shadow_ctx->hw_id); } return sprintf(buf, "\n"); } diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 6d066cf354789..db4d091be60b7 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -261,14 +261,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct render_mmio *mmio; - u32 v; - int i, array_size; - u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state; + struct intel_vgpu_submission *s = &vgpu->submission; + u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state; u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); i915_reg_t last_reg = _MMIO(0); + struct render_mmio *mmio; + u32 v; + int i, array_size; if (IS_SKYLAKE(vgpu->gvt->dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 81952139b00cc..864a2bc06e45c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; + struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct drm_i915_gem_object *ctx_obj = shadow_ctx->engine[ring_id].state->obj; struct execlist_ring_context *shadow_ring_context; @@ -249,12 +249,13 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) */ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; - struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; - struct intel_vgpu *vgpu = workload->vgpu; struct intel_ring *ring; int ret; @@ -267,7 +268,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated)) + if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated)) shadow_context_descriptor_update(shadow_ctx, dev_priv->engine[ring_id]); @@ -326,9 +327,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) static int dispatch_workload(struct intel_vgpu_workload *workload) { + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; - struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; int ret = 0; @@ -414,7 +417,7 @@ static struct intel_vgpu_workload *pick_next_workload( gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); - atomic_inc(&workload->vgpu->running_workload_num); + atomic_inc(&workload->vgpu->submission.running_workload_num); out: mutex_unlock(&gvt->lock); return workload; @@ -424,8 +427,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_gem_object *ctx_obj = shadow_ctx->engine[ring_id].state->obj; struct execlist_ring_context *shadow_ring_context; @@ -491,15 +495,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload) static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct intel_vgpu_workload *workload; - struct intel_vgpu *vgpu; + struct intel_vgpu_workload *workload = + scheduler->current_workload[ring_id]; + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; int event; mutex_lock(&gvt->lock); - workload = scheduler->current_workload[ring_id]; - vgpu = workload->vgpu; - /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. * For the workload w/o request, directly complete the workload. @@ -536,7 +539,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) } mutex_lock(&dev_priv->drm.struct_mutex); /* unpin shadow ctx as the shadow_ctx update is done */ - engine->context_unpin(engine, workload->vgpu->shadow_ctx); + engine->context_unpin(engine, s->shadow_ctx); mutex_unlock(&dev_priv->drm.struct_mutex); } @@ -548,7 +551,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); workload->complete(workload); - atomic_dec(&vgpu->running_workload_num); + atomic_dec(&s->running_workload_num); wake_up(&scheduler->workload_complete_wq); if (gvt->scheduler.need_reschedule) @@ -637,14 +640,15 @@ static int workload_thread(void *priv) void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) { + struct intel_vgpu_submission *s = &vgpu->submission; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - if (atomic_read(&vgpu->running_workload_num)) { + if (atomic_read(&s->running_workload_num)) { gvt_dbg_sched("wait vgpu idle\n"); wait_event(scheduler->workload_complete_wq, - !atomic_read(&vgpu->running_workload_num)); + !atomic_read(&s->running_workload_num)); } } @@ -718,8 +722,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) */ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { - i915_gem_context_put(vgpu->shadow_ctx); - kmem_cache_destroy(vgpu->workloads); + struct intel_vgpu_submission *s = &vgpu->submission; + + i915_gem_context_put(s->shadow_ctx); + kmem_cache_destroy(s->workloads); } /** @@ -734,35 +740,36 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { + struct intel_vgpu_submission *s = &vgpu->submission; enum intel_engine_id i; struct intel_engine_cs *engine; int ret; - vgpu->shadow_ctx = i915_gem_context_create_gvt( + s->shadow_ctx = i915_gem_context_create_gvt( &vgpu->gvt->dev_priv->drm); - if (IS_ERR(vgpu->shadow_ctx)) - return PTR_ERR(vgpu->shadow_ctx); + if (IS_ERR(s->shadow_ctx)) + return PTR_ERR(s->shadow_ctx); - bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); - vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", + s->workloads = kmem_cache_create("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, NULL); - if (!vgpu->workloads) { + if (!s->workloads) { ret = -ENOMEM; goto out_shadow_ctx; } for_each_engine(engine, vgpu->gvt->dev_priv, i) - INIT_LIST_HEAD(&vgpu->workload_q_head[i]); + INIT_LIST_HEAD(&s->workload_q_head[i]); - atomic_set(&vgpu->running_workload_num, 0); + atomic_set(&s->running_workload_num, 0); return 0; out_shadow_ctx: - i915_gem_context_put(vgpu->shadow_ctx); + i915_gem_context_put(s->shadow_ctx); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index c216aefaa73e1..3ca0087f10b20 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -122,7 +122,7 @@ struct intel_shadow_bb_entry { }; #define workload_q_head(vgpu, ring_id) \ - (&(vgpu->workload_q_head[ring_id])) + (&(vgpu->submission.workload_q_head[ring_id])) #define queue_workload(workload) do { \ list_add_tail(&workload->list, \ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 3d69871d28e9e..35a5ec2012062 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -226,7 +226,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) vgpu->active = false; - if (atomic_read(&vgpu->running_workload_num)) { + if (atomic_read(&vgpu->submission.running_workload_num)) { mutex_unlock(&gvt->lock); intel_gvt_wait_vgpu_idle(vgpu); mutex_lock(&gvt->lock); @@ -293,7 +293,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) vgpu->gvt = gvt; for (i = 0; i < I915_NUM_ENGINES; i++) - INIT_LIST_HEAD(&vgpu->workload_q_head[i]); + INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]); ret = intel_vgpu_init_sched_policy(vgpu); if (ret) From 91d5d85442b2a65e5f4e1726565c1c1a8ba9976f Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 21:33:20 +0800 Subject: [PATCH 06/50] drm/i915/gvt: Move tlb_handle_pending into intel_vgpu_submission Move tlb_handle_pending into intel_vgpu_submssion since it belongs to a part of vGPU submission stuffs Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/gvt/render.c | 3 ++- drivers/gpu/drm/i915/gvt/scheduler.c | 1 + drivers/gpu/drm/i915/gvt/vgpu.c | 1 - 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index c3f84f26090aa..93ff530eee303 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -149,6 +149,7 @@ struct intel_vgpu_submission { atomic_t running_workload_num; struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); + DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); }; struct intel_vgpu { @@ -174,7 +175,6 @@ struct intel_vgpu { /* 1/2K for each reserve ring buffer */ void *reserve_ring_buffer_va[I915_NUM_ENGINES]; int reserve_ring_buffer_size[I915_NUM_ENGINES]; - DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 00893532394ac..c78e45058219d 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1526,7 +1526,7 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, default: return -EINVAL; } - set_bit(id, (void *)vgpu->tlb_handle_pending); + set_bit(id, (void *)vgpu->submission.tlb_handle_pending); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index db4d091be60b7..e16c3551b4a3e 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -147,6 +147,7 @@ static u32 gen9_render_mocs_L3[32]; static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_vgpu_submission *s = &vgpu->submission; enum forcewake_domains fw; i915_reg_t reg; u32 regs[] = { @@ -160,7 +161,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending)) + if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) return; reg = _MMIO(regs[ring_id]); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 864a2bc06e45c..7cb1cf4223ed4 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -766,6 +766,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&s->workload_q_head[i]); atomic_set(&s->running_workload_num, 0); + bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); return 0; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 35a5ec2012062..1c9818d9f1d85 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -346,7 +346,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; - bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); intel_vgpu_init_cfg_space(vgpu, param->primary); From bf4097ea5762bd90d836df9904594eb4822fefa7 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 21:36:21 +0800 Subject: [PATCH 07/50] drm/i915/gvt: Fix a memory leak in cmd_parser.c The pointer points to the original memory can never take the return value of krealloc(). Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 701a3c6f16696..9405a214760c8 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2620,14 +2620,16 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_top = workload->rb_start + guest_rb_size; if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { - void *va = vgpu->reserve_ring_buffer_va[ring_id]; + void *va, *p; + /* realloc the new ring buffer if needed */ - vgpu->reserve_ring_buffer_va[ring_id] = - krealloc(va, workload->rb_len, GFP_KERNEL); - if (!vgpu->reserve_ring_buffer_va[ring_id]) { + va = vgpu->reserve_ring_buffer_va[ring_id]; + p = krealloc(va, workload->rb_len, GFP_KERNEL); + if (!p) { gvt_vgpu_err("fail to alloc reserve ring buffer\n"); return -ENOMEM; } + vgpu->reserve_ring_buffer_va[ring_id] = p; vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; } From 8cf80a2e4b313d1aba7d10ce6ebfbb44a119c66c Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 21:46:06 +0800 Subject: [PATCH 08/50] drm/i915/gvt: Rename reserved ring buffer "reserved" means reserve something from somewhere. Actually they are buffers used by command scanner. Rename it to ring_scan_buffer. v2: - Remove the usage of an extra variable. (Zhenyu) Fixes: 0a53bc07f044 ("drm/i915/gvt: Separate cmd scan from request allocation") Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 16 ++++++++-------- drivers/gpu/drm/i915/gvt/execlist.c | 22 +++++++++++----------- drivers/gpu/drm/i915/gvt/gvt.h | 6 +++--- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9405a214760c8..a2b970fb3c6fd 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2619,21 +2619,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { - void *va, *p; + if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) { + void *p; /* realloc the new ring buffer if needed */ - va = vgpu->reserve_ring_buffer_va[ring_id]; - p = krealloc(va, workload->rb_len, GFP_KERNEL); + p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len, + GFP_KERNEL); if (!p) { - gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); return -ENOMEM; } - vgpu->reserve_ring_buffer_va[ring_id] = p; - vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; + vgpu->ring_scan_buffer[ring_id] = p; + vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len; } - shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; + shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id]; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = shadow_ring_buffer_va; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f118454d2eabb..5e4f35f9e290a 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -864,9 +864,9 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) clean_workloads(vgpu, ALL_ENGINES); for_each_engine(engine, vgpu->gvt->dev_priv, i) { - kfree(vgpu->reserve_ring_buffer_va[i]); - vgpu->reserve_ring_buffer_va[i] = NULL; - vgpu->reserve_ring_buffer_size[i] = 0; + kfree(vgpu->ring_scan_buffer[i]); + vgpu->ring_scan_buffer[i] = NULL; + vgpu->ring_scan_buffer_size[i] = 0; } } @@ -881,21 +881,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) /* each ring has a shadow ring buffer until vgpu destroyed */ for_each_engine(engine, vgpu->gvt->dev_priv, i) { - vgpu->reserve_ring_buffer_va[i] = + vgpu->ring_scan_buffer[i] = kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); - if (!vgpu->reserve_ring_buffer_va[i]) { - gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + if (!vgpu->ring_scan_buffer[i]) { + gvt_vgpu_err("fail to alloc ring scan buffer\n"); goto out; } - vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; } return 0; out: for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (vgpu->reserve_ring_buffer_size[i]) { - kfree(vgpu->reserve_ring_buffer_va[i]); - vgpu->reserve_ring_buffer_va[i] = NULL; - vgpu->reserve_ring_buffer_size[i] = 0; + if (vgpu->ring_scan_buffer_size[i]) { + kfree(vgpu->ring_scan_buffer[i]); + vgpu->ring_scan_buffer[i] = NULL; + vgpu->ring_scan_buffer_size[i] = 0; } } return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 93ff530eee303..5b723fa00fcb5 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -172,9 +172,9 @@ struct intel_vgpu { struct intel_vgpu_opregion opregion; struct intel_vgpu_display display; struct intel_vgpu_submission submission; - /* 1/2K for each reserve ring buffer */ - void *reserve_ring_buffer_va[I915_NUM_ENGINES]; - int reserve_ring_buffer_size[I915_NUM_ENGINES]; + /* 1/2K for each engine */ + void *ring_scan_buffer[I915_NUM_ENGINES]; + int ring_scan_buffer_size[I915_NUM_ENGINES]; #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) From 325eb94a33451db138e8da7393ad8e9a0e22dd18 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 21:58:11 +0800 Subject: [PATCH 09/50] drm/i915/gvt: Move ring scan buffers into intel_vgpu_submission Move ring scan buffers into intel_vgpu_submission since they belongs to a part of vGPU submission stuffs. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 11 ++++++----- drivers/gpu/drm/i915/gvt/execlist.c | 23 +++++++++++++---------- drivers/gpu/drm/i915/gvt/gvt.h | 7 +++---- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a2b970fb3c6fd..5b033c950a31f 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2604,6 +2604,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; void *shadow_ring_buffer_va; int ring_id = workload->ring_id; @@ -2619,21 +2620,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) { + if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) { void *p; /* realloc the new ring buffer if needed */ - p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len, + p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len, GFP_KERNEL); if (!p) { gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); return -ENOMEM; } - vgpu->ring_scan_buffer[ring_id] = p; - vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len; + s->ring_scan_buffer[ring_id] = p; + s->ring_scan_buffer_size[ring_id] = workload->rb_len; } - shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id]; + shadow_ring_buffer_va = s->ring_scan_buffer[ring_id]; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = shadow_ring_buffer_va; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 5e4f35f9e290a..7c7ab63a7bb8a 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -864,15 +864,18 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) clean_workloads(vgpu, ALL_ENGINES); for_each_engine(engine, vgpu->gvt->dev_priv, i) { - kfree(vgpu->ring_scan_buffer[i]); - vgpu->ring_scan_buffer[i] = NULL; - vgpu->ring_scan_buffer_size[i] = 0; + struct intel_vgpu_submission *s = &vgpu->submission; + + kfree(s->ring_scan_buffer[i]); + s->ring_scan_buffer[i] = NULL; + s->ring_scan_buffer_size[i] = 0; } } #define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { + struct intel_vgpu_submission *s = &vgpu->submission; enum intel_engine_id i; struct intel_engine_cs *engine; @@ -881,21 +884,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) /* each ring has a shadow ring buffer until vgpu destroyed */ for_each_engine(engine, vgpu->gvt->dev_priv, i) { - vgpu->ring_scan_buffer[i] = + s->ring_scan_buffer[i] = kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); - if (!vgpu->ring_scan_buffer[i]) { + if (!s->ring_scan_buffer[i]) { gvt_vgpu_err("fail to alloc ring scan buffer\n"); goto out; } - vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; } return 0; out: for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (vgpu->ring_scan_buffer_size[i]) { - kfree(vgpu->ring_scan_buffer[i]); - vgpu->ring_scan_buffer[i] = NULL; - vgpu->ring_scan_buffer_size[i] = 0; + if (s->ring_scan_buffer_size[i]) { + kfree(s->ring_scan_buffer[i]); + s->ring_scan_buffer[i] = NULL; + s->ring_scan_buffer_size[i] = 0; } } return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 5b723fa00fcb5..49fe548700003 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -150,6 +150,9 @@ struct intel_vgpu_submission { struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); + /* 1/2K for each engine */ + void *ring_scan_buffer[I915_NUM_ENGINES]; + int ring_scan_buffer_size[I915_NUM_ENGINES]; }; struct intel_vgpu { @@ -172,10 +175,6 @@ struct intel_vgpu { struct intel_vgpu_opregion opregion; struct intel_vgpu_display display; struct intel_vgpu_submission submission; - /* 1/2K for each engine */ - void *ring_scan_buffer[I915_NUM_ENGINES]; - int ring_scan_buffer_size[I915_NUM_ENGINES]; - #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { From 8652a8aca614cc2576cf5d4c73d3b87e871ce004 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 10 Sep 2017 22:01:10 +0800 Subject: [PATCH 10/50] drm/i915/gvt: Do not allocate initial ring scan buffer Theoretically, the largest bulk of commands in the ring buffer of an engine might be the first submission, which usually contains a lot of commands to initialize the HW. After removing the initial allocation of the ring scan buffer and let krealloc() do everything we need, we still have a big chance to get the buffer of suitable size in the first submission. Tested on my SKL NUC. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 21 --------------------- drivers/gpu/drm/i915/gvt/gvt.h | 1 - 2 files changed, 22 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 7c7ab63a7bb8a..adf7668f8cc1c 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -872,36 +872,15 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) } } -#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { - struct intel_vgpu_submission *s = &vgpu->submission; enum intel_engine_id i; struct intel_engine_cs *engine; for_each_engine(engine, vgpu->gvt->dev_priv, i) init_vgpu_execlist(vgpu, i); - /* each ring has a shadow ring buffer until vgpu destroyed */ - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - s->ring_scan_buffer[i] = - kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); - if (!s->ring_scan_buffer[i]) { - gvt_vgpu_err("fail to alloc ring scan buffer\n"); - goto out; - } - s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; - } return 0; -out: - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (s->ring_scan_buffer_size[i]) { - kfree(s->ring_scan_buffer[i]); - s->ring_scan_buffer[i] = NULL; - s->ring_scan_buffer_size[i] = 0; - } - } - return -ENOMEM; } void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 49fe548700003..7a770b1c99d6a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -150,7 +150,6 @@ struct intel_vgpu_submission { struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); - /* 1/2K for each engine */ void *ring_scan_buffer[I915_NUM_ENGINES]; int ring_scan_buffer_size[I915_NUM_ENGINES]; }; From 5c56883a9531cd89561fb9a11a33697f2847c82a Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 20 Sep 2017 05:36:47 +0800 Subject: [PATCH 11/50] drm/i915/gvt: Change the return type during command scan Generally, there are 3 types of errors during command scan: a) some commands might be unknown with EBADRQC; b) some cmd access invalid address with EFAULT; c) some unexpected force nonpriv cmd with EPERM. later the healthy state can be judged through the return error. v2: - remove some internal i915 errors rating. (Zhenyu) v3: - the healthy state is judged through the internal defined return error. (Zhenyu) - force non priv cmd error can be ignored. (Kevin) v4: - reuse standard defined errno instead of recreate, e.g EBADRQC for unknown cmd, EFAULT for invalid address, EPERM for nonpriv. (Zhenyu) v5: - remove some irrelevant code for the patch. - fix typo of vgpu_is_vm_unhealthy. (Zhenyu) v6: - move the healthy check and failsafe code into another patch. (Zhenyu) v7: - polish title and commit message. (Zhenyu) Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 78 ++++++++++++++++++--------- drivers/gpu/drm/i915/gvt/scheduler.c | 2 +- 2 files changed, 54 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 5b033c950a31f..885cccf708ac7 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -825,7 +825,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", offset, data); - return -EINVAL; + return -EPERM; } return 0; } @@ -839,7 +839,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", cmd, offset); - return -EINVAL; + return -EFAULT; } if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { @@ -854,8 +854,8 @@ static int cmd_reg_handler(struct parser_exec_state *s, } if (is_force_nonpriv_mmio(offset) && - force_nonpriv_reg_handler(s, offset, index)) - return -EINVAL; + force_nonpriv_reg_handler(s, offset, index)) + return -EPERM; if (offset == i915_mmio_reg_offset(DERRMR) || offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { @@ -894,11 +894,14 @@ static int cmd_handler_lri(struct parser_exec_state *s) i915_mmio_reg_offset(DERRMR)) ret |= 0; else - ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; + ret |= (cmd_reg_inhibit(s, i)) ? + -EBADRQC : 0; } if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); + if (ret) + break; } return ret; } @@ -912,11 +915,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s) if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) ret |= ((cmd_reg_inhibit(s, i) || (cmd_reg_inhibit(s, i + 1)))) ? - -EINVAL : 0; + -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); + if (ret) + break; ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); + if (ret) + break; } return ret; } @@ -934,15 +941,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s) for (i = 1; i < cmd_len;) { if (IS_BROADWELL(gvt->dev_priv)) - ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; + ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); + if (ret) + break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); + if (ret) + break; } i += gmadr_dw_number(s) + 1; } @@ -958,11 +969,15 @@ static int cmd_handler_srm(struct parser_exec_state *s) for (i = 1; i < cmd_len;) { ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); + if (ret) + break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); + if (ret) + break; } i += gmadr_dw_number(s) + 1; } @@ -1116,7 +1131,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, v = (dword0 & GENMASK(21, 19)) >> 19; if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code))) - return -EINVAL; + return -EBADRQC; info->pipe = gen8_plane_code[v].pipe; info->plane = gen8_plane_code[v].plane; @@ -1136,7 +1151,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, info->surf_reg = SPRSURF(info->pipe); } else { WARN_ON(1); - return -EINVAL; + return -EBADRQC; } return 0; } @@ -1185,7 +1200,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, default: gvt_vgpu_err("unknown plane code %d\n", plane); - return -EINVAL; + return -EBADRQC; } info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; @@ -1348,10 +1363,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) { unsigned long addr; unsigned long gma_high, gma_low; - int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_vgpu *vgpu = s->vgpu; + int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd; - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) + if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) { + gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes); return INTEL_GVT_INVALID_ADDR; + } gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; if (gmadr_bytes == 4) { @@ -1374,16 +1392,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s, if (op_size > max_surface_size) { gvt_vgpu_err("command address audit fail name %s\n", s->info->name); - return -EINVAL; + return -EFAULT; } if (index_mode) { if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) { - ret = -EINVAL; + ret = -EFAULT; goto err; } } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) { - ret = -EINVAL; + ret = -EFAULT; goto err; } @@ -1439,7 +1457,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s) gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); - return -EINVAL; + return -EBADRQC; } static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) @@ -1588,22 +1606,26 @@ static int find_bb_size(struct parser_exec_state *s) /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); + if (gma == INTEL_GVT_INVALID_ADDR) + return -EFAULT; + cmd = cmd_val(s, 0); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } do { - copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, - gma, gma + 4, &cmd); + if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, + gma, gma + 4, &cmd) < 0) + return -EFAULT; info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } if (info->opcode == OP_MI_BATCH_BUFFER_END) { @@ -1634,11 +1656,13 @@ static int perform_bb_shadow(struct parser_exec_state *s) /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); + if (gma == INTEL_GVT_INVALID_ADDR) + return -EFAULT; /* get the size of the batch buffer */ bb_size = find_bb_size(s); if (bb_size < 0) - return -EINVAL; + return bb_size; /* allocate shadow batch buffer */ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); @@ -1710,13 +1734,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); - return -EINVAL; + return -EFAULT; } second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); - return -EINVAL; + return -EFAULT; } s->saved_buf_addr_type = s->buf_addr_type; @@ -2430,7 +2454,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } s->info = info; @@ -2465,6 +2489,10 @@ static inline bool gma_out_of_range(unsigned long gma, return (gma > gma_tail) && (gma < gma_head); } +/* Keep the consistent return type, e.g EBADRQC for unknown + * cmd, EFAULT for invalid address, EPERM for nonpriv. later + * works as the input of VM healthy status. + */ static int command_scan(struct parser_exec_state *s, unsigned long rb_head, unsigned long rb_tail, unsigned long rb_start, unsigned long rb_len) @@ -2487,7 +2515,7 @@ static int command_scan(struct parser_exec_state *s, s->ip_gma, rb_start, gma_bottom); parser_exec_state_dump(s); - return -EINVAL; + return -EFAULT; } if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { gvt_vgpu_err("ip_gma %lx out of range." diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7cb1cf4223ed4..0771b715f825c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Invalid guest context descriptor\n"); - return -EINVAL; + return -EFAULT; } page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); From e011c6ce2b4fc7c577ade41485d74431a4e6ea1a Mon Sep 17 00:00:00 2001 From: fred gao Date: Tue, 19 Sep 2017 15:11:28 +0800 Subject: [PATCH 12/50] drm/i915/gvt: Add VM healthy check for workload_thread When a scan error occurs in dispatch_workload, this patch is to check the healthy state and free all the queued workloads before the failsafe mode is entered. Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 6 ++++++ drivers/gpu/drm/i915/gvt/handlers.c | 4 +++- drivers/gpu/drm/i915/gvt/scheduler.c | 7 +++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7a770b1c99d6a..be1bb6dbcbd27 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -193,6 +193,10 @@ struct intel_vgpu { #endif }; +/* validating GM healthy status*/ +#define vgpu_is_vm_unhealthy(ret_val) \ + (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) + struct intel_gvt_gm { unsigned long vgpu_allocated_low_gm_size; unsigned long vgpu_allocated_high_gm_size; @@ -497,6 +501,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); +void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); struct intel_gvt_ops { int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, @@ -519,6 +524,7 @@ struct intel_gvt_ops { enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, GVT_FAILSAFE_INSUFFICIENT_RESOURCE, + GVT_FAILSAFE_GUEST_ERR, }; static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c78e45058219d..acc1cf4fa6f53 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -157,7 +157,7 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) -static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) +void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) { switch (reason) { case GVT_FAILSAFE_UNSUPPORTED_GUEST: @@ -165,6 +165,8 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) break; case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: pr_err("Graphics resource is not enough for the guest\n"); + case GVT_FAILSAFE_GUEST_ERR: + pr_err("GVT Internal error for the guest\n"); default: break; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0771b715f825c..02af140233836 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -634,6 +634,13 @@ static int workload_thread(void *priv) FORCEWAKE_ALL); intel_runtime_pm_put(gvt->dev_priv); + if (ret && (vgpu_is_vm_unhealthy(ret))) { + mutex_lock(&gvt->lock); + intel_vgpu_clean_execlist(vgpu); + mutex_unlock(&gvt->lock); + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + } + } return 0; } From c9214008b52b20ba2a88dad6ba92f1a19c533db6 Mon Sep 17 00:00:00 2001 From: fred gao Date: Tue, 19 Sep 2017 15:11:29 +0800 Subject: [PATCH 13/50] drm/i915/gvt: Add VM healthy check for submit_context When a scan error occurs in submit_context, this patch is to decrease the mm ref count and free the workload struct before the workload is abandoned. v2: - submit_context related code should be combined together. (Zhenyu) v3: - free all the unsubmitted workloads. (Zhenyu) v4: - refine the clean path. (Zhenyu) v5: - polish the title. (Zhenyu) Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index adf7668f8cc1c..1347f61fd7091 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -360,7 +360,6 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, static void free_workload(struct intel_vgpu_workload *workload) { - intel_vgpu_unpin_mm(workload->shadow_mm); intel_gvt_mm_unreference(workload->shadow_mm); kmem_cache_free(workload->vgpu->submission.workloads, workload); } @@ -540,7 +539,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) struct intel_vgpu_workload *next_workload; struct list_head *next = workload_q_head(vgpu, ring_id)->next; bool lite_restore = false; - int ret; + int ret = 0; gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); @@ -581,17 +580,12 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) if (lite_restore) { gvt_dbg_el("next context == current - no schedule-out\n"); - free_workload(workload); - return 0; + goto out; } ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); - if (ret) - goto err; out: - free_workload(workload); - return 0; -err: + intel_vgpu_unpin_mm(workload->shadow_mm); free_workload(workload); return ret; } @@ -762,13 +756,22 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, if (list_empty(workload_q_head(vgpu, ring_id))) { intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); - intel_gvt_scan_and_shadow_workload(workload); + ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); } - queue_workload(workload); - return 0; + if (ret == 0) + queue_workload(workload); + else { + free_workload(workload); + if (vgpu_is_vm_unhealthy(ret)) { + intel_vgpu_clean_execlist(vgpu); + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + } + } + return ret; + } int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) From 24f8a29af4afe7c53e08f4afa0c3fa9eb3791b89 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 19 Sep 2017 16:55:34 +0100 Subject: [PATCH 14/50] drm/i915/gvt: ensure -ve return value is handled correctly An earlier fix changed the return type from find_bb_size however the integer return is being assigned to a unsigned int so the -ve error check will never be detected. Make bb_size an int to fix this. Detected by CoverityScan CID#1456886 ("Unsigned compared against 0") Fixes: 1e3197d6ad73 ("drm/i915/gvt: Refine error handling for perform_bb_shadow") Signed-off-by: Colin Ian King Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 885cccf708ac7..cd2681a23fcd5 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1650,7 +1650,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) struct intel_shadow_bb_entry *entry_obj; struct intel_vgpu *vgpu = s->vgpu; unsigned long gma = 0; - uint32_t bb_size; + int bb_size; void *dst = NULL; int ret = 0; From a34e8def4d9152123582a37a3660c2092e37ceed Mon Sep 17 00:00:00 2001 From: Shuo Liu Date: Thu, 21 Sep 2017 08:02:31 +0800 Subject: [PATCH 15/50] drm/i915/gvt: Use dyndbg for gvt debug info It's better enable/disable and classify gvt debug info dynamically. This patch change it to dyndbg so can be dynamically enable/disable each item. All gvt log can be enabled by, $ echo 'file *gvt* +p' > /sys/kernel/debug/dynamic_debug/control Signed-off-by: Shuo Liu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/debug.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h index b0cff4dc26847..d98d5a0b928c7 100644 --- a/drivers/gpu/drm/i915/gvt/debug.h +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -25,41 +25,41 @@ #define __GVT_DEBUG_H__ #define gvt_err(fmt, args...) \ - DRM_ERROR("gvt: "fmt, ##args) + pr_err("gvt: "fmt, ##args) #define gvt_vgpu_err(fmt, args...) \ do { \ if (IS_ERR_OR_NULL(vgpu)) \ - DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \ + pr_debug("gvt: "fmt, ##args); \ else \ - DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ + pr_debug("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ } while (0) #define gvt_dbg_core(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) + pr_debug("gvt: core: "fmt, ##args) #define gvt_dbg_irq(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args) + pr_debug("gvt: irq: "fmt, ##args) #define gvt_dbg_mm(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args) + pr_debug("gvt: mm: "fmt, ##args) #define gvt_dbg_mmio(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args) + pr_debug("gvt: mmio: "fmt, ##args) #define gvt_dbg_dpy(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args) + pr_debug("gvt: dpy: "fmt, ##args) #define gvt_dbg_el(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args) + pr_debug("gvt: el: "fmt, ##args) #define gvt_dbg_sched(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args) + pr_debug("gvt: sched: "fmt, ##args) #define gvt_dbg_render(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args) + pr_debug("gvt: render: "fmt, ##args) #define gvt_dbg_cmd(fmt, args...) \ - DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args) + pr_debug("gvt: cmd: "fmt, ##args) #endif From 21527a8dafc40fc499ae57492c1c5d0098cbcf08 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 12 Sep 2017 21:42:09 +0800 Subject: [PATCH 16/50] drm/i915/gvt: Factor out vGPU workload creation/destroy Factor out vGPU workload creation/destroy functions since they are not specific to execlist emulation. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 27 ++++----------- drivers/gpu/drm/i915/gvt/scheduler.c | 51 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/scheduler.h | 5 +++ 3 files changed, 62 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 1347f61fd7091..a3bdb037a9feb 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -358,12 +358,6 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, return 0; } -static void free_workload(struct intel_vgpu_workload *workload) -{ - intel_gvt_mm_unreference(workload->shadow_mm); - kmem_cache_free(workload->vgpu->submission.workloads, workload); -} - #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) @@ -586,7 +580,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); out: intel_vgpu_unpin_mm(workload->shadow_mm); - free_workload(workload); + intel_vgpu_destroy_workload(workload); return ret; } @@ -687,10 +681,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, gvt_dbg_el("ring id %d begin a new workload\n", ring_id); - workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); - if (!workload) - return -ENOMEM; - /* record some ring buffer register values for scan and shadow */ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_start.val), &start, 4); @@ -699,13 +689,10 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); - INIT_LIST_HEAD(&workload->list); - INIT_LIST_HEAD(&workload->shadow_bb); - - init_waitqueue_head(&workload->shadow_ctx_status_wq); - atomic_set(&workload->shadow_ctx_active, 0); + workload = intel_vgpu_create_workload(vgpu); + if (IS_ERR(workload)) + return PTR_ERR(workload); - workload->vgpu = vgpu; workload->ring_id = ring_id; workload->ctx_desc = *desc; workload->ring_context_gpa = ring_context_gpa; @@ -715,9 +702,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, workload->rb_ctl = ctl; workload->prepare = prepare_execlist_workload; workload->complete = complete_execlist_workload; - workload->status = -EINPROGRESS; workload->emulate_schedule_in = emulate_schedule_in; - workload->shadowed = false; if (ring_id == RCS) { intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + @@ -764,7 +749,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, if (ret == 0) queue_workload(workload); else { - free_workload(workload); + intel_vgpu_destroy_workload(workload); if (vgpu_is_vm_unhealthy(ret)) { intel_vgpu_clean_execlist(vgpu); enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); @@ -853,7 +838,7 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); - free_workload(pos); + intel_vgpu_destroy_workload(pos); } clear_bit(engine->id, s->shadow_ctx_desc_updated); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 02af140233836..10ccb05d0e8d5 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -781,3 +781,54 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) i915_gem_context_put(s->shadow_ctx); return ret; } + +/** + * intel_vgpu_destroy_workload - destroy a vGPU workload + * @vgpu: a vGPU + * + * This function is called when destroy a vGPU workload. + * + */ +void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu_submission *s = &workload->vgpu->submission; + + if (workload->shadow_mm) + intel_gvt_mm_unreference(workload->shadow_mm); + + kmem_cache_free(s->workloads, workload); +} + +/** + * intel_vgpu_create_workload - create a vGPU workload + * @vgpu: a vGPU + * + * This function is called when creating a vGPU workload. + * + * Returns: + * struct intel_vgpu_workload * on success, negative error code in + * pointer if failed. + * + */ +struct intel_vgpu_workload * +intel_vgpu_create_workload(struct intel_vgpu *vgpu) +{ + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_workload *workload; + + workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); + if (!workload) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&workload->list); + INIT_LIST_HEAD(&workload->shadow_bb); + + init_waitqueue_head(&workload->shadow_ctx_status_wq); + atomic_set(&workload->shadow_ctx_active, 0); + + workload->status = -EINPROGRESS; + workload->shadowed = false; + workload->vgpu = vgpu; + + return workload; +} diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3ca0087f10b20..3fe4702cda06b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -141,5 +141,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); +struct intel_vgpu_workload * +intel_vgpu_create_workload(struct intel_vgpu *vgpu); + +void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); + void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); #endif From 497aa3f5e3bdb6bea5994f7075e2f2df2377d70e Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 12 Sep 2017 21:51:10 +0800 Subject: [PATCH 17/50] drm/i915/gvt: Factor out prepare_workload() Factor out prepare_workload() for the following re-factor. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 10ccb05d0e8d5..3d1435f55c7bf 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -325,6 +325,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return ret; } +static int prepare_workload(struct intel_vgpu_workload *workload) +{ + int ret = 0; + + if (workload->prepare) + ret = workload->prepare(workload); + + return ret; +} + static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -344,12 +354,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (ret) goto out; - if (workload->prepare) { - ret = workload->prepare(workload); - if (ret) { - engine->context_unpin(engine, shadow_ctx); - goto out; - } + ret = prepare_workload(workload); + if (ret) { + engine->context_unpin(engine, shadow_ctx); + goto out; } out: From d8235b5e55845de19983cec38af245cc200b81e2 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 12 Sep 2017 22:06:39 +0800 Subject: [PATCH 18/50] drm/i915/gvt: Move common workload preparation into prepare_workload() Move common workload preparation into prepare_workload() in scheduler.c, as they are not specific to execlist emulation. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 154 +-------------------------- drivers/gpu/drm/i915/gvt/scheduler.c | 152 +++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 151 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index a3bdb037a9feb..45cb342500b79 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -361,110 +361,6 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) -static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) -{ - const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; - struct intel_shadow_bb_entry *entry_obj; - - /* pin the gem object to ggtt */ - list_for_each_entry(entry_obj, &workload->shadow_bb, list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); - if (IS_ERR(vma)) { - return PTR_ERR(vma); - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); - if (gmadr_bytes == 8) - entry_obj->bb_start_cmd_va[2] = 0; - } - return 0; -} - -static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) -{ - struct intel_vgpu_workload *workload = container_of(wa_ctx, - struct intel_vgpu_workload, - wa_ctx); - int ring_id = workload->ring_id; - struct intel_vgpu_submission *s = &workload->vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->engine[ring_id].state->obj; - struct execlist_ring_context *shadow_ring_context; - struct page *page; - - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); - - shadow_ring_context->bb_per_ctx_ptr.val = - (shadow_ring_context->bb_per_ctx_ptr.val & - (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; - shadow_ring_context->rcs_indirect_ctx.val = - (shadow_ring_context->rcs_indirect_ctx.val & - (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; - - kunmap_atomic(shadow_ring_context); - return 0; -} - -static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) -{ - struct i915_vma *vma; - unsigned char *per_ctx_va = - (unsigned char *)wa_ctx->indirect_ctx.shadow_va + - wa_ctx->indirect_ctx.size; - - if (wa_ctx->indirect_ctx.size == 0) - return 0; - - vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, - 0, CACHELINE_BYTES, 0); - if (IS_ERR(vma)) { - return PTR_ERR(vma); - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); - - wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); - memset(per_ctx_va, 0, CACHELINE_BYTES); - - update_wa_ctx_2_shadow_ctx(wa_ctx); - return 0; -} - -static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) -{ - /* release all the shadow batch buffer */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; - - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - i915_gem_object_unpin_map(entry_obj->obj); - i915_gem_object_put(entry_obj->obj); - list_del(&entry_obj->list); - kfree(entry_obj); - } - } -} - static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -473,36 +369,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; int ret; - ret = intel_vgpu_pin_mm(workload->shadow_mm); - if (ret) { - gvt_vgpu_err("fail to vgpu pin mm\n"); - goto out; - } - - ret = intel_vgpu_sync_oos_pages(workload->vgpu); - if (ret) { - gvt_vgpu_err("fail to vgpu sync oos pages\n"); - goto err_unpin_mm; - } - - ret = intel_vgpu_flush_post_shadow(workload->vgpu); - if (ret) { - gvt_vgpu_err("fail to flush post shadow\n"); - goto err_unpin_mm; - } - - ret = prepare_shadow_batch_buffer(workload); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); - goto err_unpin_mm; - } - - ret = prepare_shadow_wa_ctx(&workload->wa_ctx); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); - goto err_shadow_batch; - } - if (!workload->emulate_schedule_in) return 0; @@ -510,18 +376,11 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); - if (!ret) - goto out; - else + if (ret) { gvt_vgpu_err("fail to emulate execlist schedule in\n"); - - release_shadow_wa_ctx(&workload->wa_ctx); -err_shadow_batch: - release_shadow_batch_buffer(workload); -err_unpin_mm: - intel_vgpu_unpin_mm(workload->shadow_mm); -out: - return ret; + return ret; + } + return 0; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) @@ -538,11 +397,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (!workload->status) { - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); - } - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 3d1435f55c7bf..a7a67cc65a071 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -325,13 +325,157 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return ret; } +static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) +{ + struct intel_gvt *gvt = workload->vgpu->gvt; + const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; + struct intel_shadow_bb_entry *entry_obj; + + /* pin the gem object to ggtt */ + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { + struct i915_vma *vma; + + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + /* update the relocate gma with shadow batch buffer*/ + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); + if (gmadr_bytes == 8) + entry_obj->bb_start_cmd_va[2] = 0; + } + return 0; +} + +static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + int ring_id = workload->ring_id; + struct intel_vgpu_submission *s = &workload->vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_gem_object *ctx_obj = + shadow_ctx->engine[ring_id].state->obj; + struct execlist_ring_context *shadow_ring_context; + struct page *page; + + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + shadow_ring_context = kmap_atomic(page); + + shadow_ring_context->bb_per_ctx_ptr.val = + (shadow_ring_context->bb_per_ctx_ptr.val & + (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; + shadow_ring_context->rcs_indirect_ctx.val = + (shadow_ring_context->rcs_indirect_ctx.val & + (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; + + kunmap_atomic(shadow_ring_context); + return 0; +} + +static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + struct i915_vma *vma; + unsigned char *per_ctx_va = + (unsigned char *)wa_ctx->indirect_ctx.shadow_va + + wa_ctx->indirect_ctx.size; + + if (wa_ctx->indirect_ctx.size == 0) + return 0; + + vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, + 0, CACHELINE_BYTES, 0); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); + + wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); + memset(per_ctx_va, 0, CACHELINE_BYTES); + + update_wa_ctx_2_shadow_ctx(wa_ctx); + return 0; +} + +static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) +{ + /* release all the shadow batch buffer */ + if (!list_empty(&workload->shadow_bb)) { + struct intel_shadow_bb_entry *entry_obj = + list_first_entry(&workload->shadow_bb, + struct intel_shadow_bb_entry, + list); + struct intel_shadow_bb_entry *temp; + + list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, + list) { + i915_gem_object_unpin_map(entry_obj->obj); + i915_gem_object_put(entry_obj->obj); + list_del(&entry_obj->list); + kfree(entry_obj); + } + } +} + static int prepare_workload(struct intel_vgpu_workload *workload) { + struct intel_vgpu *vgpu = workload->vgpu; int ret = 0; - if (workload->prepare) + ret = intel_vgpu_pin_mm(workload->shadow_mm); + if (ret) { + gvt_vgpu_err("fail to vgpu pin mm\n"); + return ret; + } + + ret = intel_vgpu_sync_oos_pages(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to vgpu sync oos pages\n"); + goto err_unpin_mm; + } + + ret = intel_vgpu_flush_post_shadow(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to flush post shadow\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_batch_buffer(workload); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } + + if (workload->prepare) { ret = workload->prepare(workload); + if (ret) + goto err_shadow_wa_ctx; + } + return 0; +err_shadow_wa_ctx: + release_shadow_wa_ctx(&workload->wa_ctx); +err_shadow_batch: + release_shadow_batch_buffer(workload); +err_unpin_mm: + intel_vgpu_unpin_mm(workload->shadow_mm); return ret; } @@ -557,6 +701,12 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; list_del_init(&workload->list); + + if (!workload->status) { + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + } + workload->complete(workload); atomic_dec(&s->running_workload_num); From 6d76303553bab75ffc53993c56aad06251d8de60 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 12 Sep 2017 22:33:12 +0800 Subject: [PATCH 19/50] drm/i915/gvt: Move common vGPU workload creation into scheduler.c Move common vGPU workload creation functions into scheduler.c since they are not specific to execlist emulation. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 156 +-------------------- drivers/gpu/drm/i915/gvt/scheduler.c | 196 ++++++++++++++++++++++++--- drivers/gpu/drm/i915/gvt/scheduler.h | 3 +- 3 files changed, 183 insertions(+), 172 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 45cb342500b79..c753cb5bfb589 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -438,179 +438,29 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) return ret; } -#define RING_CTX_OFF(x) \ - offsetof(struct execlist_ring_context, x) - -static void read_guest_pdps(struct intel_vgpu *vgpu, - u64 ring_context_gpa, u32 pdp[8]) -{ - u64 gpa; - int i; - - gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); - - for (i = 0; i < 8; i++) - intel_gvt_hypervisor_read_gpa(vgpu, - gpa + i * 8, &pdp[7 - i], 4); -} - -static int prepare_mm(struct intel_vgpu_workload *workload) -{ - struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; - struct intel_vgpu_mm *mm; - struct intel_vgpu *vgpu = workload->vgpu; - int page_table_level; - u32 pdp[8]; - - if (desc->addressing_mode == 1) { /* legacy 32-bit */ - page_table_level = 3; - } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ - page_table_level = 4; - } else { - gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); - return -EINVAL; - } - - read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); - - mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); - if (mm) { - intel_gvt_mm_reference(mm); - } else { - - mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, - pdp, page_table_level, 0); - if (IS_ERR(mm)) { - gvt_vgpu_err("fail to create mm object.\n"); - return PTR_ERR(mm); - } - } - workload->shadow_mm = mm; - return 0; -} - -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) - static int submit_context(struct intel_vgpu *vgpu, int ring_id, struct execlist_ctx_descriptor_format *desc, bool emulate_schedule_in) { struct intel_vgpu_submission *s = &vgpu->submission; - struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); struct intel_vgpu_workload *workload = NULL; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u64 ring_context_gpa; - u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; - int ret; - - ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); - if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); - return -EINVAL; - } - - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ring_header.val), &head, 4); - - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ring_tail.val), &tail, 4); - - head &= RB_HEAD_OFF_MASK; - tail &= RB_TAIL_OFF_MASK; - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; - } - - gvt_dbg_el("ring id %d begin a new workload\n", ring_id); - - /* record some ring buffer register values for scan and shadow */ - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rb_start.val), &start, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rb_ctrl.val), &ctl, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); - - workload = intel_vgpu_create_workload(vgpu); + workload = intel_vgpu_create_workload(vgpu, ring_id, desc); if (IS_ERR(workload)) return PTR_ERR(workload); - workload->ring_id = ring_id; - workload->ctx_desc = *desc; - workload->ring_context_gpa = ring_context_gpa; - workload->rb_head = head; - workload->rb_tail = tail; - workload->rb_start = start; - workload->rb_ctl = ctl; workload->prepare = prepare_execlist_workload; workload->complete = complete_execlist_workload; workload->emulate_schedule_in = emulate_schedule_in; - if (ring_id == RCS) { - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); - - workload->wa_ctx.indirect_ctx.guest_gma = - indirect_ctx & INDIRECT_CTX_ADDR_MASK; - workload->wa_ctx.indirect_ctx.size = - (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * - CACHELINE_BYTES; - workload->wa_ctx.per_ctx.guest_gma = - per_ctx & PER_CTX_ADDR_MASK; - workload->wa_ctx.per_ctx.valid = per_ctx & 1; - } - if (emulate_schedule_in) workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; - gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", - workload, ring_id, head, tail, start, ctl); - gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, emulate_schedule_in); - ret = prepare_mm(workload); - if (ret) { - kmem_cache_free(s->workloads, workload); - return ret; - } - - /* Only scan and shadow the first workload in the queue - * as there is only one pre-allocated buf-obj for shadow. - */ - if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->drm.struct_mutex); - ret = intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); - } - - if (ret == 0) - queue_workload(workload); - else { - intel_vgpu_destroy_workload(workload); - if (vgpu_is_vm_unhealthy(ret)) { - intel_vgpu_clean_execlist(vgpu); - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); - } - } - return ret; - + queue_workload(workload); + return 0; } int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a7a67cc65a071..69893f29ff6dc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -792,13 +792,8 @@ static int workload_thread(void *priv) FORCEWAKE_ALL); intel_runtime_pm_put(gvt->dev_priv); - if (ret && (vgpu_is_vm_unhealthy(ret))) { - mutex_lock(&gvt->lock); - intel_vgpu_clean_execlist(vgpu); - mutex_unlock(&gvt->lock); + if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); - } - } return 0; } @@ -957,9 +952,90 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) kmem_cache_free(s->workloads, workload); } +static struct intel_vgpu_workload * +alloc_workload(struct intel_vgpu *vgpu) +{ + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_workload *workload; + + workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); + if (!workload) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&workload->list); + INIT_LIST_HEAD(&workload->shadow_bb); + + init_waitqueue_head(&workload->shadow_ctx_status_wq); + atomic_set(&workload->shadow_ctx_active, 0); + + workload->status = -EINPROGRESS; + workload->shadowed = false; + workload->vgpu = vgpu; + + return workload; +} + +#define RING_CTX_OFF(x) \ + offsetof(struct execlist_ring_context, x) + +static void read_guest_pdps(struct intel_vgpu *vgpu, + u64 ring_context_gpa, u32 pdp[8]) +{ + u64 gpa; + int i; + + gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); + + for (i = 0; i < 8; i++) + intel_gvt_hypervisor_read_gpa(vgpu, + gpa + i * 8, &pdp[7 - i], 4); +} + +static int prepare_mm(struct intel_vgpu_workload *workload) +{ + struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; + struct intel_vgpu_mm *mm; + struct intel_vgpu *vgpu = workload->vgpu; + int page_table_level; + u32 pdp[8]; + + if (desc->addressing_mode == 1) { /* legacy 32-bit */ + page_table_level = 3; + } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ + page_table_level = 4; + } else { + gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); + return -EINVAL; + } + + read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); + + mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); + if (mm) { + intel_gvt_mm_reference(mm); + } else { + + mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, + pdp, page_table_level, 0); + if (IS_ERR(mm)) { + gvt_vgpu_err("fail to create mm object.\n"); + return PTR_ERR(mm); + } + } + workload->shadow_mm = mm; + return 0; +} + +#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ + ((a)->lrca == (b)->lrca)) + +#define get_last_workload(q) \ + (list_empty(q) ? NULL : container_of(q->prev, \ + struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU + * @desc: a guest context descriptor * * This function is called when creating a vGPU workload. * @@ -969,24 +1045,108 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) * */ struct intel_vgpu_workload * -intel_vgpu_create_workload(struct intel_vgpu *vgpu) +intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, + struct execlist_ctx_descriptor_format *desc) { struct intel_vgpu_submission *s = &vgpu->submission; - struct intel_vgpu_workload *workload; + struct list_head *q = workload_q_head(vgpu, ring_id); + struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_workload *workload = NULL; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u64 ring_context_gpa; + u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; + int ret; - workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); - if (!workload) - return ERR_PTR(-ENOMEM); + ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); + if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); + return ERR_PTR(-EINVAL); + } - INIT_LIST_HEAD(&workload->list); - INIT_LIST_HEAD(&workload->shadow_bb); + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(ring_header.val), &head, 4); - init_waitqueue_head(&workload->shadow_ctx_status_wq); - atomic_set(&workload->shadow_ctx_active, 0); + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(ring_tail.val), &tail, 4); - workload->status = -EINPROGRESS; - workload->shadowed = false; - workload->vgpu = vgpu; + head &= RB_HEAD_OFF_MASK; + tail &= RB_TAIL_OFF_MASK; + + if (last_workload && same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + } + + gvt_dbg_el("ring id %d begin a new workload\n", ring_id); + + /* record some ring buffer register values for scan and shadow */ + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(rb_start.val), &start, 4); + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(rb_ctrl.val), &ctl, 4); + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); + + workload = alloc_workload(vgpu); + if (IS_ERR(workload)) + return workload; + + workload->ring_id = ring_id; + workload->ctx_desc = *desc; + workload->ring_context_gpa = ring_context_gpa; + workload->rb_head = head; + workload->rb_tail = tail; + workload->rb_start = start; + workload->rb_ctl = ctl; + + if (ring_id == RCS) { + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); + + workload->wa_ctx.indirect_ctx.guest_gma = + indirect_ctx & INDIRECT_CTX_ADDR_MASK; + workload->wa_ctx.indirect_ctx.size = + (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * + CACHELINE_BYTES; + workload->wa_ctx.per_ctx.guest_gma = + per_ctx & PER_CTX_ADDR_MASK; + workload->wa_ctx.per_ctx.valid = per_ctx & 1; + } + + gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", + workload, ring_id, head, tail, start, ctl); + + ret = prepare_mm(workload); + if (ret) { + kmem_cache_free(s->workloads, workload); + return ERR_PTR(ret); + } + + /* Only scan and shadow the first workload in the queue + * as there is only one pre-allocated buf-obj for shadow. + */ + if (list_empty(workload_q_head(vgpu, ring_id))) { + intel_runtime_pm_get(dev_priv); + mutex_lock(&dev_priv->drm.struct_mutex); + ret = intel_gvt_scan_and_shadow_workload(workload); + mutex_unlock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_put(dev_priv); + } + + if (ret && (vgpu_is_vm_unhealthy(ret))) { + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + intel_vgpu_destroy_workload(workload); + return ERR_PTR(ret); + } return workload; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3fe4702cda06b..79199693697bd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -142,7 +142,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); struct intel_vgpu_workload * -intel_vgpu_create_workload(struct intel_vgpu *vgpu); +intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, + struct execlist_ctx_descriptor_format *desc); void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); From d0d51282b88e11a1aa71040b21a9a8cae584a1d4 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 12 Sep 2017 22:39:08 +0800 Subject: [PATCH 20/50] drm/i915/gvt: Remove one extra declaration in scheduler.h Now the function has been moved into scheduler.c. The extra declaration is not necessary. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/scheduler.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 79199693697bd..2dc729320a61b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -147,5 +147,4 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); -void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); #endif From ad1d36369b07f6b9db81897802ee5d8764eaa922 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 13 Sep 2017 00:31:29 +0800 Subject: [PATCH 21/50] drm/i915/gvt: Introduce vGPU submission ops Introduce vGPU submission ops to support easy switching submission mode of one vGPU between different OSes. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 10 ++++-- drivers/gpu/drm/i915/gvt/gvt.h | 14 ++++++++ drivers/gpu/drm/i915/gvt/handlers.c | 16 +++++++-- drivers/gpu/drm/i915/gvt/scheduler.c | 53 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/scheduler.h | 6 ++++ drivers/gpu/drm/i915/gvt/vgpu.c | 13 ++----- 6 files changed, 98 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index c753cb5bfb589..e633ac4991e89 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -548,7 +548,7 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) } } -void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) +void clean_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; struct intel_engine_cs *engine; @@ -564,7 +564,7 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) } } -int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) +int init_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; struct intel_engine_cs *engine; @@ -586,3 +586,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, for_each_engine_masked(engine, dev_priv, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } + +const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = { + .name = "execlist", + .init = init_execlist, + .clean = clean_execlist, +}; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index be1bb6dbcbd27..1be41505c4ba2 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -142,6 +142,17 @@ struct vgpu_sched_ctl { int weight; }; +enum { + INTEL_VGPU_EXECLIST_SUBMISSION = 1, + INTEL_VGPU_GUC_SUBMISSION, +}; + +struct intel_vgpu_submission_ops { + const char *name; + int (*init)(struct intel_vgpu *vgpu); + void (*clean)(struct intel_vgpu *vgpu); +}; + struct intel_vgpu_submission { struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; struct list_head workload_q_head[I915_NUM_ENGINES]; @@ -152,6 +163,9 @@ struct intel_vgpu_submission { DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); void *ring_scan_buffer[I915_NUM_ENGINES]; int ring_scan_buffer_size[I915_NUM_ENGINES]; + const struct intel_vgpu_submission_ops *ops; + int virtual_submission_interface; + bool active; }; struct intel_vgpu { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index acc1cf4fa6f53..5438dafea7a49 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1471,9 +1471,11 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; + int ret; write_vreg(vgpu, offset, p_data, bytes); @@ -1495,8 +1497,18 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, (enable_execlist ? "enabling" : "disabling"), ring_id); - if (enable_execlist) - intel_vgpu_start_schedule(vgpu); + if (!enable_execlist) + return 0; + + if (s->active) + return 0; + + ret = intel_vgpu_select_submission_ops(vgpu, + INTEL_VGPU_EXECLIST_SUBMISSION); + if (ret) + return ret; + + intel_vgpu_start_schedule(vgpu); } return 0; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 69893f29ff6dc..f3be88fa88dda 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -884,6 +884,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; + intel_vgpu_select_submission_ops(vgpu, 0); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -935,6 +936,58 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) return ret; } +/** + * intel_vgpu_select_submission_ops - select virtual submission interface + * @vgpu: a vGPU + * @interface: expected vGPU virtual submission interface + * + * This function is called when guest configures submission interface. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned int interface) +{ + struct intel_vgpu_submission *s = &vgpu->submission; + const struct intel_vgpu_submission_ops *ops[] = { + [INTEL_VGPU_EXECLIST_SUBMISSION] = + &intel_vgpu_execlist_submission_ops, + }; + int ret; + + if (WARN_ON(interface >= ARRAY_SIZE(ops))) + return -EINVAL; + + if (s->active) { + s->ops->clean(vgpu); + s->active = false; + gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", + vgpu->id, s->ops->name); + } + + if (interface == 0) { + s->ops = NULL; + s->virtual_submission_interface = 0; + gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); + return 0; + } + + ret = ops[interface]->init(vgpu); + if (ret) + return ret; + + s->ops = ops[interface]; + s->virtual_submission_interface = interface; + s->active = true; + + gvt_dbg_core("vgpu%d: activate ops [ %s ]\n", + vgpu->id, s->ops->name); + + return 0; +} + /** * intel_vgpu_destroy_workload - destroy a vGPU workload * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 2dc729320a61b..8652acda6436c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -141,6 +141,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); +int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned int interface); + +extern const struct intel_vgpu_submission_ops +intel_vgpu_execlist_submission_ops; + struct intel_vgpu_workload * intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, struct execlist_ctx_descriptor_format *desc); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 1c9818d9f1d85..e63abd48bceef 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -255,7 +255,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); - intel_vgpu_clean_execlist(vgpu); intel_vgpu_clean_display(vgpu); intel_vgpu_clean_opregion(vgpu); intel_vgpu_clean_gtt(vgpu); @@ -371,26 +370,20 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_gtt; - ret = intel_vgpu_init_execlist(vgpu); - if (ret) - goto out_clean_display; - ret = intel_vgpu_setup_submission(vgpu); if (ret) - goto out_clean_execlist; + goto out_clean_display; ret = intel_vgpu_init_sched_policy(vgpu); if (ret) - goto out_clean_shadow_ctx; + goto out_clean_submission; mutex_unlock(&gvt->lock); return vgpu; -out_clean_shadow_ctx: +out_clean_submission: intel_vgpu_clean_submission(vgpu); -out_clean_execlist: - intel_vgpu_clean_execlist(vgpu); out_clean_display: intel_vgpu_clean_display(vgpu); out_clean_gtt: From 06bb372f9ace47296aeaaca8e130d948ea2855cf Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 13 Sep 2017 01:41:35 +0800 Subject: [PATCH 22/50] drm/i915/gvt: Introduce intel_vgpu_reset_submission Introduce an generic API to reset vGPU virtual submission interface. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 20 ++++++++------------ drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/scheduler.c | 20 ++++++++++++++++++++ drivers/gpu/drm/i915/gvt/scheduler.h | 3 +++ drivers/gpu/drm/i915/gvt/vgpu.c | 4 ++-- 5 files changed, 34 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index e633ac4991e89..107eeda82e9dd 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -564,18 +564,7 @@ void clean_execlist(struct intel_vgpu *vgpu) } } -int init_execlist(struct intel_vgpu *vgpu) -{ - enum intel_engine_id i; - struct intel_engine_cs *engine; - - for_each_engine(engine, vgpu->gvt->dev_priv, i) - init_vgpu_execlist(vgpu, i); - - return 0; -} - -void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, +void reset_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -587,8 +576,15 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, init_vgpu_execlist(vgpu, engine->id); } +int init_execlist(struct intel_vgpu *vgpu) +{ + reset_execlist(vgpu, ALL_ENGINES); + return 0; +} + const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = { .name = "execlist", .init = init_execlist, + .reset = reset_execlist, .clean = clean_execlist, }; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 1be41505c4ba2..b7253d7daf725 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -151,6 +151,7 @@ struct intel_vgpu_submission_ops { const char *name; int (*init)(struct intel_vgpu *vgpu); void (*clean)(struct intel_vgpu *vgpu); + void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); }; struct intel_vgpu_submission { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f3be88fa88dda..88ce57116a4c1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -889,6 +889,26 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) kmem_cache_destroy(s->workloads); } + +/** + * intel_vgpu_reset_submission - reset submission-related resource for vGPU + * @vgpu: a vGPU + * @engine_mask: engines expected to be reset + * + * This function is called when a vGPU is being destroyed. + * + */ +void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, + unsigned long engine_mask) +{ + struct intel_vgpu_submission *s = &vgpu->submission; + + if (!s->active) + return; + + s->ops->reset(vgpu, engine_mask); +} + /** * intel_vgpu_setup_submission - setup submission-related resource for vGPU * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 8652acda6436c..e0b5730a30187 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -139,6 +139,9 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); +void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, + unsigned long engine_mask); + void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index e63abd48bceef..841b95c6c2313 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -492,10 +492,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, mutex_lock(&gvt->lock); } - intel_vgpu_reset_execlist(vgpu, resetting_eng); - + intel_vgpu_reset_submission(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { + intel_vgpu_select_submission_ops(vgpu, 0); /*fence will not be reset during virtual reset */ if (dmlr) { From e2c43c0111d54d4857d052fcfca9f3f16bf1b1b2 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 13 Sep 2017 01:58:35 +0800 Subject: [PATCH 23/50] drm/i915/gvt: Move clean_workloads() into scheduler.c Move clean_workloads() into scheduler.c since it's not specific to execlist. v2: - Remove clean_workloads in intel_vgpu_select_submission_ops. (Zhenyu) Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 41 +--------------------------- drivers/gpu/drm/i915/gvt/scheduler.c | 37 +++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 107eeda82e9dd..5c966edfeefb6 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,8 +46,6 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); - static int context_switch_events[] = { [RCS] = RCS_AS_CONTEXT_SWITCH, [BCS] = BCS_AS_CONTEXT_SWITCH, @@ -397,23 +395,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { - /* if workload->status is not successful means HW GPU - * has occurred GPU hang or something wrong with i915/GVT, - * and GVT won't inject context switch interrupt to guest. - * So this error is a vGPU hang actually to the guest. - * According to this we should emunlate a vGPU hang. If - * there are pending workloads which are already submitted - * from guest, we should clean them up like HW GPU does. - * - * if it is in middle of engine resetting, the pending - * workloads won't be submitted to HW GPU and will be - * cleaned up during the resetting process later, so doing - * the workload clean up here doesn't have any impact. - **/ - clean_workloads(vgpu, ENGINE_MASK(ring_id)); + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) goto out; - } if (!list_empty(workload_q_head(vgpu, ring_id))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; @@ -529,32 +512,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) -{ - struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_engine_cs *engine; - struct intel_vgpu_workload *pos, *n; - unsigned int tmp; - - /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - list_for_each_entry_safe(pos, n, - &s->workload_q_head[engine->id], list) { - list_del_init(&pos->list); - intel_vgpu_destroy_workload(pos); - } - clear_bit(engine->id, s->shadow_ctx_desc_updated); - } -} - void clean_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; struct intel_engine_cs *engine; - clean_workloads(vgpu, ALL_ENGINES); - for_each_engine(engine, vgpu->gvt->dev_priv, i) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -571,7 +533,6 @@ void reset_execlist(struct intel_vgpu *vgpu, struct intel_engine_cs *engine; unsigned int tmp; - clean_workloads(vgpu, engine_mask); for_each_engine_masked(engine, dev_priv, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 88ce57116a4c1..391690c0c28c1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -644,6 +644,25 @@ static void update_guest_context(struct intel_vgpu_workload *workload) kunmap(page); } +static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) +{ + struct intel_vgpu_submission *s = &vgpu->submission; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine; + struct intel_vgpu_workload *pos, *n; + unsigned int tmp; + + /* free the unsubmited workloads in the queues. */ + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + list_for_each_entry_safe(pos, n, + &s->workload_q_head[engine->id], list) { + list_del_init(&pos->list); + intel_vgpu_destroy_workload(pos); + } + clear_bit(engine->id, s->shadow_ctx_desc_updated); + } +} + static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -707,6 +726,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) release_shadow_wa_ctx(&workload->wa_ctx); } + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + /* if workload->status is not successful means HW GPU + * has occurred GPU hang or something wrong with i915/GVT, + * and GVT won't inject context switch interrupt to guest. + * So this error is a vGPU hang actually to the guest. + * According to this we should emunlate a vGPU hang. If + * there are pending workloads which are already submitted + * from guest, we should clean them up like HW GPU does. + * + * if it is in middle of engine resetting, the pending + * workloads won't be submitted to HW GPU and will be + * cleaned up during the resetting process later, so doing + * the workload clean up here doesn't have any impact. + **/ + clean_workloads(vgpu, ENGINE_MASK(ring_id)); + } + workload->complete(workload); atomic_dec(&s->running_workload_num); @@ -906,6 +942,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, if (!s->active) return; + clean_workloads(vgpu, engine_mask); s->ops->reset(vgpu, engine_mask); } From c5d71cb31723135dbaa54b32bf046342fe538210 Mon Sep 17 00:00:00 2001 From: fred gao Date: Thu, 28 Sep 2017 11:03:02 +0800 Subject: [PATCH 24/50] drm/i915/gvt: Move vGPU type related code into gvt file In this patch, all the vGPU type related code will be merged into same gvt file and the common interface will be exposed to both XenGT and KvmGT. v2: - remove the useless mdev_* gvt_ops. add get_gvt_attr ops for MPT module. intel_gvt_{init,cleanup}_vgpu_type_groups are initialized in gvt part. (Wang, Zhi) - set gvt_vgpu_type_groups[i] to NULL. (Zhang,Xiong) Signed-off-by: fred gao Reviewed-by: Zhi Wang Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 134 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/gvt.h | 4 + 2 files changed, 138 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index aaa347f8620cc..ed313811fa761 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -36,6 +36,8 @@ #include "i915_drv.h" #include "gvt.h" +#include +#include struct intel_gvt_host intel_gvt_host; @@ -44,6 +46,129 @@ static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_KVM] = "KVM", }; +static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, + const char *name) +{ + int i; + struct intel_vgpu_type *t; + const char *driver_name = dev_driver_string( + &gvt->dev_priv->drm.pdev->dev); + + for (i = 0; i < gvt->num_types; i++) { + t = &gvt->types[i]; + if (!strncmp(t->name, name + strlen(driver_name) + 1, + sizeof(t->name))) + return t; + } + + return NULL; +} + +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) +{ + struct intel_vgpu_type *type; + unsigned int num = 0; + void *gvt = kdev_to_i915(dev)->gvt; + + type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + if (!type) + num = 0; + else + num = type->avail_instance; + + return sprintf(buf, "%u\n", num); +} + +static ssize_t device_api_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); +} + +static ssize_t description_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + struct intel_vgpu_type *type; + void *gvt = kdev_to_i915(dev)->gvt; + + type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + if (!type) + return 0; + + return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" + "fence: %d\nresolution: %s\n" + "weight: %d\n", + BYTES_TO_MB(type->low_gm_size), + BYTES_TO_MB(type->high_gm_size), + type->fence, vgpu_edid_str(type->resolution), + type->weight); +} + +static MDEV_TYPE_ATTR_RO(available_instances); +static MDEV_TYPE_ATTR_RO(device_api); +static MDEV_TYPE_ATTR_RO(description); + +static struct attribute *gvt_type_attrs[] = { + &mdev_type_attr_available_instances.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_description.attr, + NULL, +}; + +static struct attribute_group *gvt_vgpu_type_groups[] = { + [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, +}; + +static bool intel_get_gvt_attrs(struct attribute ***type_attrs, + struct attribute_group ***intel_vgpu_type_groups) +{ + *type_attrs = gvt_type_attrs; + *intel_vgpu_type_groups = gvt_vgpu_type_groups; + return true; +} + +static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +{ + int i, j; + struct intel_vgpu_type *type; + struct attribute_group *group; + + for (i = 0; i < gvt->num_types; i++) { + type = &gvt->types[i]; + + group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); + if (WARN_ON(!group)) + goto unwind; + + group->name = type->name; + group->attrs = gvt_type_attrs; + gvt_vgpu_type_groups[i] = group; + } + + return true; + +unwind: + for (j = 0; j < i; j++) { + group = gvt_vgpu_type_groups[j]; + kfree(group); + } + + return false; +} + +static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +{ + int i; + struct attribute_group *group; + + for (i = 0; i < gvt->num_types; i++) { + group = gvt_vgpu_type_groups[i]; + gvt_vgpu_type_groups[i] = NULL; + kfree(group); + } +} + static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, @@ -54,6 +179,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { .vgpu_reset = intel_gvt_reset_vgpu, .vgpu_activate = intel_gvt_activate_vgpu, .vgpu_deactivate = intel_gvt_deactivate_vgpu, + .gvt_find_vgpu_type = intel_gvt_find_vgpu_type, + .get_gvt_attrs = intel_get_gvt_attrs, }; /** @@ -202,6 +329,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_free_firmware(gvt); intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); + intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_clean_vgpu_types(gvt); idr_destroy(&gvt->vgpu_idr); @@ -292,6 +420,12 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) goto out_clean_thread; + ret = intel_gvt_init_vgpu_type_groups(gvt); + if (ret == false) { + gvt_err("failed to init vgpu type groups: %d\n", ret); + goto out_clean_types; + } + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, &intel_gvt_ops); if (ret) { diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b7253d7daf725..35c8d87151ddf 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -533,6 +533,10 @@ struct intel_gvt_ops { void (*vgpu_reset)(struct intel_vgpu *); void (*vgpu_activate)(struct intel_vgpu *); void (*vgpu_deactivate)(struct intel_vgpu *); + struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt, + const char *name); + bool (*get_gvt_attrs)(struct attribute ***type_attrs, + struct attribute_group ***intel_vgpu_type_groups); }; From 6aa23ced915f94e23cd7bc3c1d005e39ef39bb2b Mon Sep 17 00:00:00 2001 From: fred gao Date: Thu, 28 Sep 2017 11:03:03 +0800 Subject: [PATCH 25/50] drm/i915/gvt: Refactor vGPU type code in kvmgt part all the vGPU type related code in kvmgt will be moved into gvt.c/gvt.h files while the common vGPU type related interfaces will be called. v2: - intel_gvt_{init,cleanup}_vgpu_type_groups are initialized in gvt part. (Wang, Zhi) Signed-off-by: fred gao Reviewed-by: Zhi Wang Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 128 ++----------------------------- 1 file changed, 8 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index fc6878bb24968..110f07e8bcfb9 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -248,120 +248,6 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) } } -static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, - const char *name) -{ - int i; - struct intel_vgpu_type *t; - const char *driver_name = dev_driver_string( - &gvt->dev_priv->drm.pdev->dev); - - for (i = 0; i < gvt->num_types; i++) { - t = &gvt->types[i]; - if (!strncmp(t->name, name + strlen(driver_name) + 1, - sizeof(t->name))) - return t; - } - - return NULL; -} - -static ssize_t available_instances_show(struct kobject *kobj, - struct device *dev, char *buf) -{ - struct intel_vgpu_type *type; - unsigned int num = 0; - void *gvt = kdev_to_i915(dev)->gvt; - - type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); - if (!type) - num = 0; - else - num = type->avail_instance; - - return sprintf(buf, "%u\n", num); -} - -static ssize_t device_api_show(struct kobject *kobj, struct device *dev, - char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); -} - -static ssize_t description_show(struct kobject *kobj, struct device *dev, - char *buf) -{ - struct intel_vgpu_type *type; - void *gvt = kdev_to_i915(dev)->gvt; - - type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); - if (!type) - return 0; - - return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" - "fence: %d\nresolution: %s\n" - "weight: %d\n", - BYTES_TO_MB(type->low_gm_size), - BYTES_TO_MB(type->high_gm_size), - type->fence, vgpu_edid_str(type->resolution), - type->weight); -} - -static MDEV_TYPE_ATTR_RO(available_instances); -static MDEV_TYPE_ATTR_RO(device_api); -static MDEV_TYPE_ATTR_RO(description); - -static struct attribute *type_attrs[] = { - &mdev_type_attr_available_instances.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_description.attr, - NULL, -}; - -static struct attribute_group *intel_vgpu_type_groups[] = { - [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, -}; - -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i, j; - struct intel_vgpu_type *type; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - type = &gvt->types[i]; - - group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); - if (WARN_ON(!group)) - goto unwind; - - group->name = type->name; - group->attrs = type_attrs; - intel_vgpu_type_groups[i] = group; - } - - return true; - -unwind: - for (j = 0; j < i; j++) { - group = intel_vgpu_type_groups[j]; - kfree(group); - } - - return false; -} - -static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - group = intel_vgpu_type_groups[i]; - kfree(group); - } -} - static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) { hash_init(info->ptable); @@ -441,7 +327,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; - type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj)); if (!type) { gvt_vgpu_err("failed to find type %s to create\n", kobject_name(kobj)); @@ -1212,8 +1098,7 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static const struct mdev_parent_ops intel_vgpu_ops = { - .supported_type_groups = intel_vgpu_type_groups, +static struct mdev_parent_ops intel_vgpu_ops = { .mdev_attr_groups = intel_vgpu_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1229,17 +1114,20 @@ static const struct mdev_parent_ops intel_vgpu_ops = { static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) { - if (!intel_gvt_init_vgpu_type_groups(gvt)) - return -EFAULT; + struct attribute **kvm_type_attrs; + struct attribute_group **kvm_vgpu_type_groups; intel_gvt_ops = ops; + if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs, + &kvm_vgpu_type_groups)) + return -EFAULT; + intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups; return mdev_register_device(dev, &intel_vgpu_ops); } static void kvmgt_host_exit(struct device *dev, void *gvt) { - intel_gvt_cleanup_vgpu_type_groups(gvt); mdev_unregister_device(dev); } From bc7b0be316aebac42eb9e8e54c984609555944da Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 26 Sep 2017 16:19:13 +0800 Subject: [PATCH 26/50] drm/i915/gvt: Add basic debugfs infrastructure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need debugfs entry to expose some debug information of gvt and vGPUs. The first tool will be added is mmio-diff, which help to find the difference values of host and vGPU mmio. It's useful for platform enabling. This patch just add a basic debugfs infrastructure, each vGPU has its own sub-folder. Two simple attributes are created as a template. . ├── num_tracked_mmio ├── vgpu1 | └── active └── vgpu2 └── active Signed-off-by: Changbin Du Reviewed-by: Zhi Wang Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/Makefile | 2 +- drivers/gpu/drm/i915/gvt/debugfs.c | 97 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/gvt.c | 5 ++ drivers/gpu/drm/i915/gvt/gvt.h | 12 +++- drivers/gpu/drm/i915/gvt/handlers.c | 2 - drivers/gpu/drm/i915/gvt/vgpu.c | 7 +++ 6 files changed, 121 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/i915/gvt/debugfs.c diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index f5486cb94818c..7100240a4a81a 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,7 +1,7 @@ GVT_DIR := gvt GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o render.o cmd_parser.o + execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c new file mode 100644 index 0000000000000..02acd58426055 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -0,0 +1,97 @@ +/* + * Copyright(c) 2011-2017 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include "i915_drv.h" +#include "gvt.h" + + +/** + * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed. + */ +int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) +{ + struct dentry *ent; + char name[10] = ""; + + sprintf(name, "vgpu%d", vgpu->id); + vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); + if (!vgpu->debugfs) + return -ENOMEM; + + ent = debugfs_create_bool("active", 0444, vgpu->debugfs, + &vgpu->active); + if (!ent) + return -ENOMEM; + + return 0; +} + +/** + * intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU + * @vgpu: a vGPU + */ +void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) +{ + debugfs_remove_recursive(vgpu->debugfs); + vgpu->debugfs = NULL; +} + +/** + * intel_gvt_debugfs_init - register gvt debugfs root entry + * @gvt: GVT device + * + * Returns: + * zero on success, negative if failed. + */ +int intel_gvt_debugfs_init(struct intel_gvt *gvt) +{ + struct drm_minor *minor = gvt->dev_priv->drm.primary; + struct dentry *ent; + + gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); + if (!gvt->debugfs_root) { + gvt_err("Cannot create debugfs dir\n"); + return -ENOMEM; + } + + ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, + &gvt->mmio.num_tracked_mmio); + if (!ent) + return -ENOMEM; + + return 0; +} + +/** + * intel_gvt_debugfs_clean - remove debugfs entries + * @gvt: GVT device + */ +void intel_gvt_debugfs_clean(struct intel_gvt *gvt) +{ + debugfs_remove_recursive(gvt->debugfs_root); + gvt->debugfs_root = NULL; +} diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index ed313811fa761..cef06d59884e3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -318,6 +318,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) if (WARN_ON(!gvt)) return; + intel_gvt_debugfs_clean(gvt); clean_service_thread(gvt); intel_gvt_clean_cmd_parser(gvt); intel_gvt_clean_sched_policy(gvt); @@ -441,6 +442,10 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) } gvt->idle_vgpu = vgpu; + ret = intel_gvt_debugfs_init(gvt); + if (ret) + gvt_err("debugfs registeration failed, go on.\n"); + gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; return 0; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 35c8d87151ddf..f08d194f639fa 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -190,6 +190,8 @@ struct intel_vgpu { struct intel_vgpu_display display; struct intel_vgpu_submission submission; + struct dentry *debugfs; + #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { struct mdev_device *mdev; @@ -253,7 +255,7 @@ struct intel_gvt_mmio { unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); - unsigned int num_tracked_mmio; + unsigned long num_tracked_mmio; }; struct intel_gvt_firmware { @@ -301,6 +303,8 @@ struct intel_gvt { struct task_struct *service_thread; wait_queue_head_t service_thread_wq; unsigned long service_request; + + struct dentry *debugfs_root; }; static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) @@ -619,6 +623,12 @@ static inline bool intel_gvt_mmio_has_mode_mask( return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; } +int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); +int intel_gvt_debugfs_init(struct intel_gvt *gvt); +void intel_gvt_debugfs_clean(struct intel_gvt *gvt); + + #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 5438dafea7a49..408600befc9f4 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2928,8 +2928,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) gvt->mmio.mmio_block = mmio_blocks; gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); - gvt_dbg_mmio("traced %u virtual mmio registers\n", - gvt->mmio.num_tracked_mmio); return 0; err: intel_gvt_clean_mmio_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 841b95c6c2313..99dbefadfe919 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -252,6 +252,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) WARN(vgpu->active, "vGPU is still active!\n"); + intel_gvt_debugfs_remove_vgpu(vgpu); idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); @@ -378,10 +379,16 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_submission; + ret = intel_gvt_debugfs_add_vgpu(vgpu); + if (ret) + goto out_clean_sched_policy; + mutex_unlock(&gvt->lock); return vgpu; +out_clean_sched_policy: + intel_vgpu_clean_sched_policy(vgpu); out_clean_submission: intel_vgpu_clean_submission(vgpu); out_clean_display: From 5e86ccefa3c4e2d12acd7f5fc417ea120edbca21 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 26 Sep 2017 15:02:21 +0800 Subject: [PATCH 27/50] drm/i915/gvt: Use BIT() to make klockwork happy Replace the plain bit usage with BIT() to make klockwork happy. Cc: Deng Hongyi Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2801d70579d8c..77da3ecfc4cc6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -351,7 +351,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) return false; e->type = get_entry_type(e->type); - if (!(e->val64 & (1 << 7))) + if (!(e->val64 & BIT(7))) return false; e->type = get_pse_type(e->type); @@ -369,12 +369,12 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & (1 << 0)); + return (e->val64 & BIT(0)); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~(1 << 0); + e->val64 &= ~BIT(0); } /* From 58facf8c46160706ccb3b7b3768c0f0e29407548 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Fri, 22 Sep 2017 21:12:03 +0800 Subject: [PATCH 28/50] drm/i915/gvt: Refine find_bb_size() Returns the error code if something is wrong and the size of batch buffer is passed through the pointer. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 30 +++++++++++++-------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index cd2681a23fcd5..3355eb4fab753 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1594,23 +1594,23 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) return 1; } -static int find_bb_size(struct parser_exec_state *s) +static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) { unsigned long gma = 0; struct cmd_info *info; - int bb_size = 0; uint32_t cmd_len = 0; - bool met_bb_end = false; + bool bb_end = false; struct intel_vgpu *vgpu = s->vgpu; u32 cmd; + *bb_size = 0; + /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", @@ -1629,20 +1629,18 @@ static int find_bb_size(struct parser_exec_state *s) } if (info->opcode == OP_MI_BATCH_BUFFER_END) { - met_bb_end = true; + bb_end = true; } else if (info->opcode == OP_MI_BATCH_BUFFER_START) { - if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) { + if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) /* chained batch buffer */ - met_bb_end = true; - } + bb_end = true; } cmd_len = get_cmd_length(info, cmd) << 2; - bb_size += cmd_len; + *bb_size += cmd_len; gma += cmd_len; + } while (!bb_end); - } while (!met_bb_end); - - return bb_size; + return 0; } static int perform_bb_shadow(struct parser_exec_state *s) @@ -1650,7 +1648,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) struct intel_shadow_bb_entry *entry_obj; struct intel_vgpu *vgpu = s->vgpu; unsigned long gma = 0; - int bb_size; + unsigned long bb_size; void *dst = NULL; int ret = 0; @@ -1660,9 +1658,9 @@ static int perform_bb_shadow(struct parser_exec_state *s) return -EFAULT; /* get the size of the batch buffer */ - bb_size = find_bb_size(s); - if (bb_size < 0) - return bb_size; + ret = find_bb_size(s, &bb_size); + if (ret) + return ret; /* allocate shadow batch buffer */ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); From f52c380a48f527930c86ea6fd7242873c93ba682 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sun, 24 Sep 2017 21:53:03 +0800 Subject: [PATCH 29/50] drm/i915/gvt: Refine shadow batch buffer 1) Use standard i915 GEM object sequence to access the shadow batch buffer. 2) Manage i915 vma life cycle to solve one FIXME. v2: - Refine code structure. - Refine the usage of GEM APIs. - Add the missing lock/unlock in release_shadow_batch_buffer. Test on my SKL NuC. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 76 +++++++++++------------ drivers/gpu/drm/i915/gvt/scheduler.c | 88 ++++++++++++++++++--------- drivers/gpu/drm/i915/gvt/scheduler.h | 7 ++- 3 files changed, 100 insertions(+), 71 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 3355eb4fab753..0204430836810 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1645,11 +1645,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) static int perform_bb_shadow(struct parser_exec_state *s) { - struct intel_shadow_bb_entry *entry_obj; struct intel_vgpu *vgpu = s->vgpu; + struct intel_vgpu_shadow_bb *bb; unsigned long gma = 0; unsigned long bb_size; - void *dst = NULL; int ret = 0; /* get the start gm address of the batch buffer */ @@ -1657,51 +1656,51 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; - /* get the size of the batch buffer */ ret = find_bb_size(s, &bb_size); if (ret) return ret; - /* allocate shadow batch buffer */ - entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); - if (entry_obj == NULL) + bb = kzalloc(sizeof(*bb), GFP_KERNEL); + if (!bb) return -ENOMEM; - entry_obj->obj = - i915_gem_object_create(s->vgpu->gvt->dev_priv, - roundup(bb_size, PAGE_SIZE)); - if (IS_ERR(entry_obj->obj)) { - ret = PTR_ERR(entry_obj->obj); - goto free_entry; + bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv, + roundup(bb_size, PAGE_SIZE)); + if (IS_ERR(bb->obj)) { + ret = PTR_ERR(bb->obj); + goto err_free_bb; } - entry_obj->len = bb_size; - INIT_LIST_HEAD(&entry_obj->list); - dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB); - if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - goto put_obj; - } + ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush); + if (ret) + goto err_free_obj; - ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); - if (ret) { - gvt_vgpu_err("failed to set shadow batch to CPU\n"); - goto unmap_src; + bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); + if (IS_ERR(bb->va)) { + ret = PTR_ERR(bb->va); + goto err_finish_shmem_access; } - entry_obj->va = dst; - entry_obj->bb_start_cmd_va = s->ip_va; + if (bb->clflush & CLFLUSH_BEFORE) { + drm_clflush_virt_range(bb->va, bb->obj->base.size); + bb->clflush &= ~CLFLUSH_BEFORE; + } - /* copy batch buffer to shadow batch buffer*/ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, gma, gma + bb_size, - dst); + bb->va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); - goto unmap_src; + ret = -EFAULT; + goto err_unmap; } - list_add(&entry_obj->list, &s->workload->shadow_bb); + INIT_LIST_HEAD(&bb->list); + list_add(&bb->list, &s->workload->shadow_bb); + + bb->accessing = true; + bb->bb_start_cmd_va = s->ip_va; + /* * ip_va saves the virtual address of the shadow batch buffer, while * ip_gma saves the graphics address of the original batch buffer. @@ -1710,17 +1709,17 @@ static int perform_bb_shadow(struct parser_exec_state *s) * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). */ - s->ip_va = dst; + s->ip_va = bb->va; s->ip_gma = gma; - return 0; - -unmap_src: - i915_gem_object_unpin_map(entry_obj->obj); -put_obj: - i915_gem_object_put(entry_obj->obj); -free_entry: - kfree(entry_obj); +err_unmap: + i915_gem_object_unpin_map(bb->obj); +err_finish_shmem_access: + i915_gem_obj_finish_shmem_access(bb->obj); +err_free_obj: + i915_gem_object_put(bb->obj); +err_free_bb: + kfree(bb); return ret; } @@ -1762,7 +1761,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (ret < 0) return ret; } - return ret; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 391690c0c28c1..f2d4c90ea1d47 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -325,31 +325,46 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return ret; } +static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); + static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { struct intel_gvt *gvt = workload->vgpu->gvt; const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; - struct intel_shadow_bb_entry *entry_obj; + struct intel_vgpu_shadow_bb *bb; + int ret; - /* pin the gem object to ggtt */ - list_for_each_entry(entry_obj, &workload->shadow_bb, list) { - struct i915_vma *vma; + list_for_each_entry(bb, &workload->shadow_bb, list) { + bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0); + if (IS_ERR(bb->vma)) { + ret = PTR_ERR(bb->vma); + goto err; + } - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); + /* relocate shadow batch buffer */ + bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); + if (gmadr_bytes == 8) + bb->bb_start_cmd_va[2] = 0; - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ + /* No one is going to touch shadow bb from now on. */ + if (bb->clflush & CLFLUSH_AFTER) { + drm_clflush_virt_range(bb->va, bb->obj->base.size); + bb->clflush &= ~CLFLUSH_AFTER; + } - /* update the relocate gma with shadow batch buffer*/ - entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); - if (gmadr_bytes == 8) - entry_obj->bb_start_cmd_va[2] = 0; + ret = i915_gem_object_set_to_gtt_domain(bb->obj, false); + if (ret) + goto err; + + i915_gem_obj_finish_shmem_access(bb->obj); + bb->accessing = false; + + i915_vma_move_to_active(bb->vma, workload->req, 0); } return 0; +err: + release_shadow_batch_buffer(workload); + return ret; } static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -410,22 +425,37 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - /* release all the shadow batch buffer */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; - - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - i915_gem_object_unpin_map(entry_obj->obj); - i915_gem_object_put(entry_obj->obj); - list_del(&entry_obj->list); - kfree(entry_obj); + struct intel_vgpu *vgpu = workload->vgpu; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_vgpu_shadow_bb *bb, *pos; + + if (list_empty(&workload->shadow_bb)) + return; + + bb = list_first_entry(&workload->shadow_bb, + struct intel_vgpu_shadow_bb, list); + + mutex_lock(&dev_priv->drm.struct_mutex); + + list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { + if (bb->obj) { + if (bb->accessing) + i915_gem_obj_finish_shmem_access(bb->obj); + + if (bb->va && !IS_ERR(bb->va)) + i915_gem_object_unpin_map(bb->obj); + + if (bb->vma && !IS_ERR(bb->vma)) { + i915_vma_unpin(bb->vma); + i915_vma_close(bb->vma); + } + __i915_gem_object_release_unless_active(bb->obj); } + list_del(&bb->list); + kfree(bb); } + + mutex_unlock(&dev_priv->drm.struct_mutex); } static int prepare_workload(struct intel_vgpu_workload *workload) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index e0b5730a30187..e4a9f9acd4a95 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -112,13 +112,14 @@ struct intel_vgpu_workload { struct intel_shadow_wa_ctx wa_ctx; }; -/* Intel shadow batch buffer is a i915 gem object */ -struct intel_shadow_bb_entry { +struct intel_vgpu_shadow_bb { struct list_head list; struct drm_i915_gem_object *obj; + struct i915_vma *vma; void *va; - unsigned long len; u32 *bb_start_cmd_va; + unsigned int clflush; + bool accessing; }; #define workload_q_head(vgpu, ring_id) \ From 7d1e5cdf01789729aff2da4005f51f58b491040c Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Fri, 29 Sep 2017 02:47:55 +0800 Subject: [PATCH 30/50] drm/i915/gvt: Factor intel_vgpu_page_track As the data structure of "intel_vgpu_guest_page" will become much heavier in future, it's better to factor out the guest memory page track mechnisim as early as possible. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 154 ++++++++++++++++++-------------- drivers/gpu/drm/i915/gvt/gtt.h | 30 +++---- drivers/gpu/drm/i915/gvt/mmio.c | 32 +++---- drivers/gpu/drm/i915/gvt/mpt.h | 34 +++---- 4 files changed, 136 insertions(+), 114 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 77da3ecfc4cc6..bd3dc209cd892 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -494,7 +494,7 @@ static inline int ppgtt_spt_get_entry( return -EINVAL; ret = ops->get_entry(page_table, e, index, guest, - spt->guest_page.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << GTT_PAGE_SHIFT, spt->vgpu); if (ret) return ret; @@ -516,7 +516,7 @@ static inline int ppgtt_spt_set_entry( return -EINVAL; return ops->set_entry(page_table, e, index, guest, - spt->guest_page.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << GTT_PAGE_SHIFT, spt->vgpu); } @@ -537,86 +537,101 @@ static inline int ppgtt_spt_set_entry( spt->shadow_page.type, e, index, false) /** - * intel_vgpu_init_guest_page - init a guest page data structure + * intel_vgpu_init_page_track - init a page track data structure * @vgpu: a vGPU - * @p: a guest page data structure + * @t: a page track data structure * @gfn: guest memory page frame number - * @handler: function will be called when target guest memory page has + * @handler: the function will be called when target guest memory page has * been modified. * - * This function is called when user wants to track a guest memory page. + * This function is called when a user wants to prepare a page track data + * structure to track a guest memory page. * * Returns: * Zero on success, negative error code if failed. */ -int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p, +int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t, unsigned long gfn, int (*handler)(void *, u64, void *, int), void *data) { - INIT_HLIST_NODE(&p->node); + INIT_HLIST_NODE(&t->node); - p->writeprotection = false; - p->gfn = gfn; - p->handler = handler; - p->data = data; - p->oos_page = NULL; - p->write_cnt = 0; + t->tracked = false; + t->gfn = gfn; + t->handler = handler; + t->data = data; - hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn); + hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn); return 0; } -static int detach_oos_page(struct intel_vgpu *vgpu, - struct intel_vgpu_oos_page *oos_page); - /** - * intel_vgpu_clean_guest_page - release the resource owned by guest page data - * structure + * intel_vgpu_clean_page_track - release a page track data structure * @vgpu: a vGPU - * @p: a tracked guest page + * @t: a page track data structure * - * This function is called when user tries to stop tracking a guest memory - * page. + * This function is called before a user frees a page track data structure. */ -void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p) +void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t) { - if (!hlist_unhashed(&p->node)) - hash_del(&p->node); + if (!hlist_unhashed(&t->node)) + hash_del(&t->node); - if (p->oos_page) - detach_oos_page(vgpu, p->oos_page); - - if (p->writeprotection) - intel_gvt_hypervisor_unset_wp_page(vgpu, p); + if (t->tracked) + intel_gvt_hypervisor_disable_page_track(vgpu, t); } /** - * intel_vgpu_find_guest_page - find a guest page data structure by GFN. + * intel_vgpu_find_tracked_page - find a tracked guest page * @vgpu: a vGPU * @gfn: guest memory page frame number * - * This function is called when emulation logic wants to know if a trapped GFN - * is a tracked guest page. + * This function is called when the emulation layer wants to figure out if a + * trapped GFN is a tracked guest page. * * Returns: - * Pointer to guest page data structure, NULL if failed. + * Pointer to page track data structure, NULL if not found. */ -struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( +struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( struct intel_vgpu *vgpu, unsigned long gfn) { - struct intel_vgpu_guest_page *p; + struct intel_vgpu_page_track *t; - hash_for_each_possible(vgpu->gtt.guest_page_hash_table, - p, node, gfn) { - if (p->gfn == gfn) - return p; + hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table, + t, node, gfn) { + if (t->gfn == gfn) + return t; } return NULL; } +static int init_guest_page(struct intel_vgpu *vgpu, + struct intel_vgpu_guest_page *p, + unsigned long gfn, + int (*handler)(void *, u64, void *, int), + void *data) +{ + p->oos_page = NULL; + p->write_cnt = 0; + + return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data); +} + +static int detach_oos_page(struct intel_vgpu *vgpu, + struct intel_vgpu_oos_page *oos_page); + +static void clean_guest_page(struct intel_vgpu *vgpu, + struct intel_vgpu_guest_page *p) +{ + if (p->oos_page) + detach_oos_page(vgpu, p->oos_page); + + intel_vgpu_clean_page_track(vgpu, &p->track); +} + static inline int init_shadow_page(struct intel_vgpu *vgpu, struct intel_vgpu_shadow_page *p, int type) { @@ -664,6 +679,9 @@ static inline struct intel_vgpu_shadow_page *find_shadow_page( return NULL; } +#define page_track_to_guest_page(ptr) \ + container_of(ptr, struct intel_vgpu_guest_page, track) + #define guest_page_to_ppgtt_spt(ptr) \ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) @@ -697,7 +715,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); clean_shadow_page(spt->vgpu, &spt->shadow_page); - intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); + clean_guest_page(spt->vgpu, &spt->guest_page); list_del_init(&spt->post_shadow_list); free_spt(spt); @@ -713,22 +731,24 @@ static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); } -static int ppgtt_handle_guest_write_page_table_bytes(void *gp, +static int ppgtt_handle_guest_write_page_table_bytes( + struct intel_vgpu_guest_page *gpt, u64 pa, void *p_data, int bytes); -static int ppgtt_write_protection_handler(void *gp, u64 pa, +static int ppgtt_write_protection_handler(void *data, u64 pa, void *p_data, int bytes) { - struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; + struct intel_vgpu_page_track *t = data; + struct intel_vgpu_guest_page *p = page_track_to_guest_page(t); int ret; if (bytes != 4 && bytes != 8) return -EINVAL; - if (!gpt->writeprotection) + if (!t->tracked) return -EINVAL; - ret = ppgtt_handle_guest_write_page_table_bytes(gp, + ret = ppgtt_handle_guest_write_page_table_bytes(p, pa, p_data, bytes); if (ret) return ret; @@ -768,7 +788,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( goto err; } - ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, + ret = init_guest_page(vgpu, &spt->guest_page, gfn, ppgtt_write_protection_handler, NULL); if (ret) { gvt_vgpu_err("fail to initialize guest page for spt\n"); @@ -856,7 +876,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) int v = atomic_read(&spt->refcount); trace_spt_change(spt->vgpu->id, "die", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); @@ -878,7 +898,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) } release: trace_spt_change(spt->vgpu->id, "release", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); ppgtt_free_shadow_page(spt); return 0; fail: @@ -895,6 +915,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s = NULL; struct intel_vgpu_guest_page *g; + struct intel_vgpu_page_track *t; int ret; if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) { @@ -902,8 +923,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( goto fail; } - g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we)); - if (g) { + t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we)); + if (t) { + g = page_track_to_guest_page(t); s = guest_page_to_ppgtt_spt(g); ppgtt_get_shadow_page(s); } else { @@ -915,7 +937,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( goto fail; } - ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page); + ret = intel_gvt_hypervisor_enable_page_track(vgpu, + &s->guest_page.track); if (ret) goto fail; @@ -923,7 +946,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( if (ret) goto fail; - trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn, + trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn, s->shadow_page.type); } return s; @@ -953,7 +976,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) int ret; trace_spt_change(spt->vgpu->id, "born", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); if (gtt_type_is_pte_pt(spt->shadow_page.type)) { for_each_present_guest_entry(spt, &ge, i) { @@ -1082,7 +1105,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu, index++) { ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); ops->get_entry(NULL, &new, index, true, - oos_page->guest_page->gfn << PAGE_SHIFT, vgpu); + oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); if (old.val64 == new.val64 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) @@ -1132,8 +1155,9 @@ static int attach_oos_page(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT, - oos_page->mem, GTT_PAGE_SIZE); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + gpt->track.gfn << GTT_PAGE_SHIFT, + oos_page->mem, GTT_PAGE_SIZE); if (ret) return ret; @@ -1152,7 +1176,7 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, { int ret; - ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt); + ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track); if (ret) return ret; @@ -1200,7 +1224,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); - return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt); + return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track); } /** @@ -1335,10 +1359,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) return 0; } -static int ppgtt_handle_guest_write_page_table_bytes(void *gp, +static int ppgtt_handle_guest_write_page_table_bytes( + struct intel_vgpu_guest_page *gpt, u64 pa, void *p_data, int bytes) { - struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; @@ -2032,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_mm *ggtt_mm; - hash_init(gtt->guest_page_hash_table); + hash_init(gtt->tracked_guest_page_hash_table); hash_init(gtt->shadow_page_hash_table); INIT_LIST_HEAD(>t->mm_list_head); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 30a4c8d160266..bd9b298b87778 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -193,18 +193,16 @@ struct intel_vgpu_scratch_pt { unsigned long page_mfn; }; - struct intel_vgpu_gtt { struct intel_vgpu_mm *ggtt_mm; unsigned long active_ppgtt_mm_bitmap; struct list_head mm_list_head; DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); - DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); - atomic_t n_write_protected_guest_page; + DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); + atomic_t n_tracked_guest_page; struct list_head oos_page_list_head; struct list_head post_shadow_list_head; struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; - }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); @@ -228,12 +226,16 @@ struct intel_vgpu_shadow_page { unsigned long mfn; }; -struct intel_vgpu_guest_page { +struct intel_vgpu_page_track { struct hlist_node node; - bool writeprotection; + bool tracked; unsigned long gfn; int (*handler)(void *, u64, void *, int); void *data; +}; + +struct intel_vgpu_guest_page { + struct intel_vgpu_page_track track; unsigned long write_cnt; struct intel_vgpu_oos_page *oos_page; }; @@ -258,22 +260,16 @@ struct intel_vgpu_ppgtt_spt { struct list_head post_shadow_list; }; -int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *guest_page, +int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t, unsigned long gfn, int (*handler)(void *gp, u64, void *, int), void *data); -void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *guest_page); - -int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *guest_page); - -void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *guest_page); +void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t); -struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( +struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( struct intel_vgpu *vgpu, unsigned long gfn); int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 1e1310f50289a..4ea0feb5f04dc 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -117,18 +117,18 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, else memcpy(pt, p_data, bytes); - } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; + } else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { + struct intel_vgpu_page_track *t; /* Since we enter the failsafe mode early during guest boot, * guest may not have chance to set up its ppgtt table, so * there should not be any wp pages for guest. Keep the wp * related code here in case we need to handle it in furture. */ - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { + t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); + if (t) { /* remove write protection to prevent furture traps */ - intel_vgpu_clean_guest_page(vgpu, gp); + intel_vgpu_clean_page_track(vgpu, t); if (read) intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); @@ -170,17 +170,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, return ret; } - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; + if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { + struct intel_vgpu_page_track *t; - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { + t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); + if (t) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); if (ret) { gvt_vgpu_err("guest page read error %d, " "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - ret, gp->gfn, pa, *(u32 *)p_data, + ret, t->gfn, pa, *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); @@ -267,17 +267,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, return ret; } - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; + if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { + struct intel_vgpu_page_track *t; - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { - ret = gp->handler(gp, pa, p_data, bytes); + t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); + if (t) { + ret = t->handler(t, pa, p_data, bytes); if (ret) { gvt_err("guest page write error %d, " "gfn 0x%lx, pa 0x%llx, " "var 0x%x, len %d\n", - ret, gp->gfn, pa, + ret, t->gfn, pa, *(u32 *)p_data, bytes); } mutex_unlock(&gvt->lock); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index f0e5487e66886..c436e20ea59ef 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -154,51 +154,53 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p) } /** - * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected + * intel_gvt_hypervisor_enable - set a guest page to write-protected * @vgpu: a vGPU - * @p: intel_vgpu_guest_page + * @t: page track data structure * * Returns: * Zero on success, negative error code if failed. */ -static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p) +static inline int intel_gvt_hypervisor_enable_page_track( + struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t) { int ret; - if (p->writeprotection) + if (t->tracked) return 0; - ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn); + ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn); if (ret) return ret; - p->writeprotection = true; - atomic_inc(&vgpu->gtt.n_write_protected_guest_page); + t->tracked = true; + atomic_inc(&vgpu->gtt.n_tracked_guest_page); return 0; } /** - * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a + * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a * guest page * @vgpu: a vGPU - * @p: intel_vgpu_guest_page + * @t: page track data structure * * Returns: * Zero on success, negative error code if failed. */ -static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p) +static inline int intel_gvt_hypervisor_disable_page_track( + struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t) { int ret; - if (!p->writeprotection) + if (!t->tracked) return 0; - ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn); + ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn); if (ret) return ret; - p->writeprotection = false; - atomic_dec(&vgpu->gtt.n_write_protected_guest_page); + t->tracked = false; + atomic_dec(&vgpu->gtt.n_tracked_guest_page); return 0; } From 62a6a53786fc4b4e7543cc63b704dbb3f7df4c0f Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Sat, 30 Sep 2017 17:42:20 +0800 Subject: [PATCH 31/50] drm/i915/gvt: Export intel_gvt_render_mmio_to_ring_id() Since many emulation logic needs to convert the offset of ring registers into ring id, we export it for other caller which might need it. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 21 +++++++++++++++------ drivers/gpu/drm/i915/gvt/mmio.h | 2 ++ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 408600befc9f4..bb16ef5c85104 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt, return 0; } -static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) +/** + * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * Ring ID on success, negative error code if failed. + */ +int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, + unsigned int offset) { enum intel_engine_id id; struct intel_engine_cs *engine; - reg &= ~GENMASK(11, 0); + offset &= ~GENMASK(11, 0); for_each_engine(engine, gvt->dev_priv, id) { - if (engine->mmio_base == reg) + if (engine->mmio_base == offset) return id; } - return -1; + return -ENODEV; } #define offset_to_fence_num(offset) \ @@ -1445,7 +1454,7 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); + int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; int ret = 0; @@ -1473,7 +1482,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, { struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; - int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); + int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; int ret; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 32cd64ddad266..dbc04ad2c7a1e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -65,6 +65,8 @@ struct intel_gvt_mmio_info { struct hlist_node node; }; +int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, + unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); From 9556e118889293f6d5d226b64688ee2adfd8964c Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 10 Oct 2017 13:51:32 +0800 Subject: [PATCH 32/50] drm/i915/gvt: Use I915_GTT_PAGE_SIZE As there is already an I915_GTT_PAGE_SIZE marco in i915, let GVT-g use it as well. Also this patch re-names some GTT marcos with additional prefix. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 13 +++---- drivers/gpu/drm/i915/gvt/gtt.c | 51 ++++++++++++++------------- drivers/gpu/drm/i915/gvt/gtt.h | 7 ++-- drivers/gpu/drm/i915/gvt/reg.h | 3 +- drivers/gpu/drm/i915/gvt/scheduler.c | 14 ++++---- 5 files changed, 45 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 0204430836810..8c8514ee73ac0 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1396,7 +1396,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, } if (index_mode) { - if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) { + if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { ret = -EFAULT; goto err; } @@ -1563,10 +1563,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, return -EFAULT; } - offset = gma & (GTT_PAGE_SIZE - 1); + offset = gma & (I915_GTT_PAGE_SIZE - 1); - copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ? - GTT_PAGE_SIZE - offset : end_gma - gma; + copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? + I915_GTT_PAGE_SIZE - offset : end_gma - gma; intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); @@ -2540,7 +2540,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) int ret = 0; /* ring base is page aligned */ - if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE))) + if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE))) return -EINVAL; gma_head = workload->rb_start + workload->rb_head; @@ -2589,7 +2589,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx); /* ring base is page aligned */ - if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) + if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, + I915_GTT_PAGE_SIZE))) return -EINVAL; ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index bd3dc209cd892..c9cbd71be4c58 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, u64 h_addr; int ret; - ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT, + ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, &h_addr); if (ret) return ret; - *h_index = h_addr >> GTT_PAGE_SHIFT; + *h_index = h_addr >> I915_GTT_PAGE_SHIFT; return 0; } @@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, u64 g_addr; int ret; - ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT, + ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, &g_addr); if (ret) return ret; - *g_index = g_addr >> GTT_PAGE_SHIFT; + *g_index = g_addr >> I915_GTT_PAGE_SHIFT; return 0; } @@ -382,7 +382,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) */ static unsigned long gma_to_ggtt_pte_index(unsigned long gma) { - unsigned long x = (gma >> GTT_PAGE_SHIFT); + unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); trace_gma_index(__func__, gma, x); return x; @@ -494,7 +494,7 @@ static inline int ppgtt_spt_get_entry( return -EINVAL; ret = ops->get_entry(page_table, e, index, guest, - spt->guest_page.track.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); if (ret) return ret; @@ -516,7 +516,7 @@ static inline int ppgtt_spt_set_entry( return -EINVAL; return ops->set_entry(page_table, e, index, guest, - spt->guest_page.track.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); } @@ -649,7 +649,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, INIT_HLIST_NODE(&p->node); - p->mfn = daddr >> GTT_PAGE_SHIFT; + p->mfn = daddr >> I915_GTT_PAGE_SHIFT; hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); return 0; } @@ -659,7 +659,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu, { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; - dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, + dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096, PCI_DMA_BIDIRECTIONAL); if (!hlist_unhashed(&p->node)) @@ -818,7 +818,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) #define pt_entries(spt) \ - (GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) + (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ @@ -1101,8 +1101,8 @@ static int sync_oos_page(struct intel_vgpu *vgpu, old.type = new.type = get_entry_type(spt->guest_page_type); old.val64 = new.val64 = 0; - for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift); - index++) { + for (index = 0; index < (I915_GTT_PAGE_SIZE >> + info->gtt_entry_size_shift); index++) { ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); ops->get_entry(NULL, &new, index, true, oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); @@ -1156,8 +1156,8 @@ static int attach_oos_page(struct intel_vgpu *vgpu, int ret; ret = intel_gvt_hypervisor_read_gpa(vgpu, - gpt->track.gfn << GTT_PAGE_SHIFT, - oos_page->mem, GTT_PAGE_SIZE); + gpt->track.gfn << I915_GTT_PAGE_SHIFT, + oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) return ret; @@ -1439,7 +1439,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) mm->shadow_page_table = mem + mm->page_table_entry_size; } else if (mm->type == INTEL_GVT_MM_GGTT) { mm->page_table_entry_cnt = - (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT); + (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT); mm->page_table_entry_size = mm->page_table_entry_cnt * info->gtt_entry_size; mem = vzalloc(mm->page_table_entry_size); @@ -1761,8 +1761,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) gma_ops->gma_to_ggtt_pte_index(gma)); if (ret) goto err; - gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) - + (gma & ~GTT_PAGE_MASK); + gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); return gpa; @@ -1814,8 +1814,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) } } - gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) - + (gma & ~GTT_PAGE_MASK); + gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ppgtt", 0, mm->page_table_level, gma, gpa); @@ -1883,7 +1883,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (bytes != 4 && bytes != 8) return -EINVAL; - gma = g_gtt_index << GTT_PAGE_SHIFT; + gma = g_gtt_index << I915_GTT_PAGE_SHIFT; /* the VM may configure the whole GM space when ballooning is used */ if (!vgpu_gmadr_is_valid(vgpu, gma)) @@ -1946,7 +1946,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, { struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - int page_entry_num = GTT_PAGE_SIZE >> + int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; int i; @@ -1970,7 +1970,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, return -ENOMEM; } gtt->scratch_pt[type].page_mfn = - (unsigned long)(daddr >> GTT_PAGE_SHIFT); + (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, gtt->scratch_pt[type].page_mfn); @@ -2013,7 +2013,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { if (vgpu->gtt.scratch_pt[i].page != NULL) { daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << - GTT_PAGE_SHIFT); + I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; @@ -2310,7 +2310,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENOMEM; } gvt->gtt.scratch_ggtt_page = virt_to_page(page); - gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); + gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> + I915_GTT_PAGE_SHIFT); if (enable_out_of_sync) { ret = setup_spt_oos(gvt); @@ -2337,7 +2338,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { struct device *dev = &gvt->dev_priv->drm.pdev->dev; dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << - GTT_PAGE_SHIFT); + I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index bd9b298b87778..80b2deb097ad9 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -34,9 +34,8 @@ #ifndef _GVT_GTT_H_ #define _GVT_GTT_H_ -#define GTT_PAGE_SHIFT 12 -#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT) -#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1)) +#define I915_GTT_PAGE_SHIFT 12 +#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) struct intel_vgpu_mm; @@ -245,7 +244,7 @@ struct intel_vgpu_oos_page { struct list_head list; struct list_head vm_list; int id; - unsigned char mem[GTT_PAGE_SIZE]; + unsigned char mem[I915_GTT_PAGE_SIZE]; }; #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 83f2f63d7eeba..d4f7ce6dc1d73 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -74,6 +74,7 @@ #define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2)) #define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3)) #define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12)) -#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE) +#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \ + I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f2d4c90ea1d47..7a1ffaa9ae06d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -81,7 +81,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << - GTT_PAGE_SHIFT)); + I915_GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Invalid guest context descriptor\n"); return -EFAULT; @@ -90,7 +90,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, - GTT_PAGE_SIZE); + I915_GTT_PAGE_SIZE); kunmap(page); i++; } @@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), (void *)shadow_ring_context + sizeof(*shadow_ring_context), - GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); kunmap(page); return 0; @@ -635,7 +635,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << - GTT_PAGE_SHIFT)); + I915_GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("invalid guest context descriptor\n"); return; @@ -644,7 +644,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - GTT_PAGE_SIZE); + I915_GTT_PAGE_SIZE); kunmap(page); i++; } @@ -669,7 +669,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), (void *)shadow_ring_context + sizeof(*shadow_ring_context), - GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); kunmap(page); } @@ -1198,7 +1198,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, int ret; ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); + (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT)); if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); return ERR_PTR(-EINVAL); From 22115cef0869b7d08cb2aa60a65d4d978598644c Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 10 Oct 2017 14:34:11 +0800 Subject: [PATCH 33/50] drm/i915/gvt: Let the caller choose if a shadow page should be put into hash table As we want to re-use intel_vgpu_shadow_page in buidling scrach page table and we don't want to put scrach page table page into hash table, a new param is introduced to give the caller a choice to decide if a shadow page should be put into hash table. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 25 +++++++++++++------------ drivers/gpu/drm/i915/gvt/gtt.h | 4 ++-- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index c9cbd71be4c58..912d63b2ff447 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -633,7 +633,7 @@ static void clean_guest_page(struct intel_vgpu *vgpu, } static inline int init_shadow_page(struct intel_vgpu *vgpu, - struct intel_vgpu_shadow_page *p, int type) + struct intel_vgpu_shadow_page *p, int type, bool hash) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; dma_addr_t daddr; @@ -650,7 +650,8 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, INIT_HLIST_NODE(&p->node); p->mfn = daddr >> I915_GTT_PAGE_SHIFT; - hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); + if (hash) + hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); return 0; } @@ -782,7 +783,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( * TODO: guest page type may be different with shadow page type, * when we support PSE page in future. */ - ret = init_shadow_page(vgpu, &spt->shadow_page, type); + ret = init_shadow_page(vgpu, &spt->shadow_page, type, true); if (ret) { gvt_vgpu_err("fail to initialize shadow page for spt\n"); goto err; @@ -1902,11 +1903,11 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, * update the entry in this situation p2m will fail * settting the shadow entry to point to a scratch page */ - ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); } } else { m = e; - ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); } ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); @@ -2309,16 +2310,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) __free_page(virt_to_page(page)); return -ENOMEM; } - gvt->gtt.scratch_ggtt_page = virt_to_page(page); - gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> - I915_GTT_PAGE_SHIFT); + + gvt->gtt.scratch_page = virt_to_page(page); + gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); - __free_page(gvt->gtt.scratch_ggtt_page); + __free_page(gvt->gtt.scratch_page); return ret; } } @@ -2337,12 +2338,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) void intel_gvt_clean_gtt(struct intel_gvt *gvt) { struct device *dev = &gvt->dev_priv->drm.pdev->dev; - dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << + dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); - __free_page(gvt->gtt.scratch_ggtt_page); + __free_page(gvt->gtt.scratch_page); if (enable_out_of_sync) clean_spt_oos(gvt); @@ -2368,7 +2369,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); e.type = GTT_TYPE_GGTT_PTE; - ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&e, gvt->gtt.scratch_mfn); e.val64 |= _PAGE_PRESENT; index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 80b2deb097ad9..ab590278dcf1d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -85,8 +85,8 @@ struct intel_gvt_gtt { struct list_head oos_page_free_list_head; struct list_head mm_lru_list_head; - struct page *scratch_ggtt_page; - unsigned long scratch_ggtt_mfn; + struct page *scratch_page; + unsigned long scratch_mfn; }; enum { From 7422064883398612b5921bf26b15bd9b0dd6c325 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 10 Oct 2017 17:14:13 +0800 Subject: [PATCH 34/50] drm/i915/gvt: Fix a bug of unexpectedly clear scratch page table During a vGPU reset, the scratch page table shouldn't be cleared, what needs to be cleared should be the scratch page. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 912d63b2ff447..d47b8b100ac28 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2395,8 +2395,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) */ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) { - int i; - ppgtt_free_all_shadow_page(vgpu); /* Shadow pages are only created when there is no page @@ -2406,11 +2404,4 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); intel_vgpu_reset_ggtt(vgpu); - - /* clear scratch page for security */ - for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - if (vgpu->gtt.scratch_pt[i].page != NULL) - memset(page_address(vgpu->gtt.scratch_pt[i].page), - 0, PAGE_SIZE); - } } From 054f4eba2a2985b1db43353b7b5ce90e96cf9bb9 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 10 Oct 2017 17:19:30 +0800 Subject: [PATCH 35/50] drm/i915/gvt: Introduce page table type of current level in GTT type enumerations Need to figure out page table type of current level by GTT entry type during getting a scratch page table entry. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d47b8b100ac28..74be5e628310a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -156,13 +156,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, struct gtt_type_table_entry { int entry_type; + int pt_type; int next_pt_type; int pse_entry_type; }; -#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \ +#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ [type] = { \ .entry_type = e_type, \ + .pt_type = cpt_type, \ .next_pt_type = npt_type, \ .pse_entry_type = pse_type, \ } @@ -170,55 +172,68 @@ struct gtt_type_table_entry { static struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, + GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PML4_ENTRY, + GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, GTT_TYPE_PPGTT_PML4_ENTRY, + GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, GTT_TYPE_PPGTT_ROOT_L3_ENTRY, + GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, GTT_TYPE_GGTT_PTE, GTT_TYPE_INVALID, + GTT_TYPE_INVALID, GTT_TYPE_INVALID), }; @@ -227,6 +242,11 @@ static inline int get_next_pt_type(int type) return gtt_type_table[type].next_pt_type; } +static inline int get_pt_type(int type) +{ + return gtt_type_table[type].pt_type; +} + static inline int get_entry_type(int type) { return gtt_type_table[type].entry_type; From 655c64efe36f199bea16f9ba7388c479d5feed5f Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 10 Oct 2017 17:24:26 +0800 Subject: [PATCH 36/50] drm/i915/gvt: Introduce ops->set_present() We need ops->set_present() during generating a new scratch page table entry. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 6 ++++++ drivers/gpu/drm/i915/gvt/gtt.h | 1 + 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 74be5e628310a..3d6008b116e57 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -397,6 +397,11 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) e->val64 &= ~BIT(0); } +static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) +{ + e->val64 |= BIT(0); +} + /* * Per-platform GMA routines. */ @@ -426,6 +431,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, + .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, .get_pfn = gen8_gtt_get_pfn, diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index ab590278dcf1d..f98c1c19b4cb4 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -62,6 +62,7 @@ struct intel_gvt_gtt_pte_ops { struct intel_vgpu *vgpu); bool (*test_present)(struct intel_gvt_gtt_entry *e); void (*clear_present)(struct intel_gvt_gtt_entry *e); + void (*set_present)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e); void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); From c1802534e5a6ec089e2b951116adfc14bb6dae64 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 17 Oct 2017 02:00:27 +0800 Subject: [PATCH 37/50] drm/i915/gvt: Refine broken PPGTT scratch Refine previously broken PPGTT scratch. Scratch PTE was no correctly handled and also the handling of scratch entries in page table walk was not well organized, which brings gaps of introducing lazy shadow. Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 196 ++++++++++++++++++--------------- drivers/gpu/drm/i915/gvt/gtt.h | 17 +-- 2 files changed, 112 insertions(+), 101 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 3d6008b116e57..6fa9271e23a56 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -841,20 +841,51 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( return NULL; } +static bool ppgtt_is_scratch_entry(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *e) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + int pt_type = get_pt_type(e->type); + + if (pt_type == GTT_TYPE_INVALID) + return false; + + if (ops->get_pfn(e) == vgpu->gtt.ppgtt_scratch_page[pt_type].mfn) + return true; + + return false; +} + +static void ppgtt_get_scratch_entry(struct intel_vgpu *vgpu, int type, + struct intel_gvt_gtt_entry *e) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_vgpu_shadow_page *scratch_page; + int pt_type = get_pt_type(type); + + if (WARN_ON(pt_type == GTT_TYPE_INVALID)) + return; + + scratch_page = &vgpu->gtt.ppgtt_scratch_page[pt_type]; + + e->type = get_entry_type(type); + ops->get_entry(scratch_page->vaddr, e, 0, false, 0, vgpu); +} + #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) #define pt_entries(spt) \ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) -#define for_each_present_guest_entry(spt, e, i) \ +#define for_each_guest_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ - if (!ppgtt_get_guest_entry(spt, e, i) && \ - spt->vgpu->gvt->gtt.pte_ops->test_present(e)) + if (!ppgtt_get_guest_entry(spt, e, i)) #define for_each_present_shadow_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ + !ppgtt_is_scratch_entry(spt->vgpu, e) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) @@ -873,18 +904,13 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; - intel_gvt_gtt_type_t cur_pt_type; if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) return -EINVAL; - if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY - && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { - cur_pt_type = get_next_pt_type(e->type) + 1; - if (ops->get_pfn(e) == - vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) - return 0; - } + if (WARN_ON(ppgtt_is_scratch_entry(vgpu, e))) + return -EINVAL; + s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); if (!s) { gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", @@ -997,6 +1023,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; unsigned long i; @@ -1006,22 +1033,34 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) spt->guest_page.track.gfn, spt->shadow_page.type); if (gtt_type_is_pte_pt(spt->shadow_page.type)) { - for_each_present_guest_entry(spt, &ge, i) { - ret = gtt_entry_p2m(vgpu, &ge, &se); - if (ret) - goto fail; + for_each_guest_entry(spt, &ge, i) { + if (ops->test_present(&ge)) { + ret = gtt_entry_p2m(vgpu, &ge, &se); + if (ret) + goto fail; + } else { + ppgtt_get_scratch_entry(vgpu, + spt->shadow_page.type, &se); + } ppgtt_set_shadow_entry(spt, &se, i); } return 0; } - for_each_present_guest_entry(spt, &ge, i) { + for_each_guest_entry(spt, &ge, i) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { gvt_vgpu_err("GVT doesn't support pse bit now\n"); ret = -EINVAL; goto fail; } + if (!ops->test_present(&ge)) { + ppgtt_get_scratch_entry(vgpu, spt->shadow_page.type, + &se); + ppgtt_set_shadow_entry(spt, &se, i); + continue; + } + s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); if (IS_ERR(s)) { ret = PTR_ERR(s); @@ -1053,7 +1092,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, if (!ops->test_present(se)) return 0; - if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn) + if (ppgtt_is_scratch_entry(vgpu, se)) return 0; if (gtt_type_is_pt(get_next_pt_type(se->type))) { @@ -1292,7 +1331,6 @@ static int ppgtt_handle_guest_write_page_table( { struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu *vgpu = spt->vgpu; - int type = spt->shadow_page.type; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se; @@ -1319,7 +1357,7 @@ static int ppgtt_handle_guest_write_page_table( goto fail; if (!new_present) { - ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_get_scratch_entry(vgpu, spt->shadow_page.type, &se); ppgtt_set_shadow_entry(spt, &se, index); } @@ -1968,106 +2006,85 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; } -static int alloc_scratch_pages(struct intel_vgpu *vgpu, +static void ppgtt_destroy_scratch(struct intel_vgpu *vgpu) +{ + struct intel_vgpu_shadow_page *scratch_page; + int i; + + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + scratch_page = &vgpu->gtt.ppgtt_scratch_page[i]; + if (scratch_page->page != NULL) { + clean_shadow_page(vgpu, scratch_page); + __free_page(scratch_page->page); + } + } +} + +static int setup_ppgtt_scratch_page(struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_device_info *info = &gvt->device_info; + int num_entries = I915_GTT_PAGE_SIZE >> info->gtt_entry_size_shift; struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - int page_entry_num = I915_GTT_PAGE_SIZE >> - vgpu->gvt->device_info.gtt_entry_size_shift; - void *scratch_pt; - int i; - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - dma_addr_t daddr; + struct intel_vgpu_shadow_page *scratch_page; + struct intel_gvt_gtt_entry e; + intel_gvt_gtt_type_t next_pt_type; + int ret, i; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); - if (!scratch_pt) { + scratch_page = >t->ppgtt_scratch_page[type]; + + scratch_page->page = alloc_page(GFP_KERNEL); + if (!scratch_page) { gvt_vgpu_err("fail to allocate scratch page\n"); return -ENOMEM; } - daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, - 4096, PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, daddr)) { - gvt_vgpu_err("fail to dmamap scratch_pt\n"); - __free_page(virt_to_page(scratch_pt)); + ret = init_shadow_page(vgpu, scratch_page, type, false); + if (ret) { + gvt_vgpu_err("fail to allocate scratch page\n"); + __free_page(scratch_page->page); return -ENOMEM; } - gtt->scratch_pt[type].page_mfn = - (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); - gtt->scratch_pt[type].page = virt_to_page(scratch_pt); - gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", - vgpu->id, type, gtt->scratch_pt[type].page_mfn); - - /* Build the tree by full filled the scratch pt with the entries which - * point to the next level scratch pt or scratch page. The - * scratch_pt[type] indicate the scratch pt/scratch page used by the - * 'type' pt. - * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self - * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. - */ - if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { - struct intel_gvt_gtt_entry se; - - memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); - se.type = get_entry_type(type - 1); - ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); - - /* The entry parameters like present/writeable/cache type - * set to the same as i915's scratch page tree. - */ - se.val64 |= _PAGE_PRESENT | _PAGE_RW; - if (type == GTT_TYPE_PPGTT_PDE_PT) - se.val64 |= PPAT_CACHED; - - for (i = 0; i < page_entry_num; i++) - ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); - } - return 0; -} - -static int release_scratch_page_tree(struct intel_vgpu *vgpu) -{ - int i; - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - dma_addr_t daddr; + memset(&e, 0, sizeof(e)); - for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - if (vgpu->gtt.scratch_pt[i].page != NULL) { - daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << - I915_GTT_PAGE_SHIFT); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); - __free_page(vgpu->gtt.scratch_pt[i].page); - vgpu->gtt.scratch_pt[i].page = NULL; - vgpu->gtt.scratch_pt[i].page_mfn = 0; - } + if (type == GTT_TYPE_PPGTT_PTE_PT) { + e.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; + ops->set_pfn(&e, gvt->gtt.scratch_mfn); + } else { + next_pt_type = get_next_pt_type(type); + e.type = get_entry_type(type); + ops->set_pfn(&e, gtt->ppgtt_scratch_page[next_pt_type].mfn); } + ops->set_present(&e); + + for (i = 0; i < num_entries; i++) + ops->set_entry(scratch_page->vaddr, &e, i, false, 0, vgpu); + return 0; } -static int create_scratch_page_tree(struct intel_vgpu *vgpu) +static int ppgtt_create_scratch(struct intel_vgpu *vgpu) { int i, ret; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - ret = alloc_scratch_pages(vgpu, i); + ret = setup_ppgtt_scratch_page(vgpu, i); if (ret) goto err; } - return 0; - err: - release_scratch_page_tree(vgpu); + ppgtt_destroy_scratch(vgpu); return ret; } - + /** * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization * @vgpu: a vGPU @@ -2100,8 +2117,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) } gtt->ggtt_mm = ggtt_mm; - - return create_scratch_page_tree(vgpu); + return ppgtt_create_scratch(vgpu); } static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) @@ -2133,7 +2149,7 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { ppgtt_free_all_shadow_page(vgpu); - release_scratch_page_tree(vgpu); + ppgtt_destroy_scratch(vgpu); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index f98c1c19b4cb4..416b2f868cf09 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -188,9 +188,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref); struct intel_vgpu_guest_page; -struct intel_vgpu_scratch_pt { +struct intel_vgpu_shadow_page { + void *vaddr; struct page *page; - unsigned long page_mfn; + int type; + struct hlist_node node; + unsigned long mfn; }; struct intel_vgpu_gtt { @@ -202,7 +205,7 @@ struct intel_vgpu_gtt { atomic_t n_tracked_guest_page; struct list_head oos_page_list_head; struct list_head post_shadow_list_head; - struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; + struct intel_vgpu_shadow_page ppgtt_scratch_page[GTT_TYPE_MAX]; }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); @@ -218,14 +221,6 @@ extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, struct intel_vgpu_oos_page; -struct intel_vgpu_shadow_page { - void *vaddr; - struct page *page; - int type; - struct hlist_node node; - unsigned long mfn; -}; - struct intel_vgpu_page_track { struct hlist_node node; bool tracked; From a2ae95af9646316aaf86e2d18f46de1a5f746f1a Mon Sep 17 00:00:00 2001 From: Weinan Li Date: Fri, 20 Oct 2017 15:16:46 +0800 Subject: [PATCH 38/50] drm/i915/gvt: update CSB and CSB write pointer in virtual HWSP The engine provides a mirror of the CSB and CSB write pointer in the HWSP. Read these status from virtual HWSP in VM can reduce CPU utilization while applications have much more short GPU workloads. Here we update the corresponding data in virtual HWSP as it in virtual MMIO. Before read these status from HWSP in GVT-g VM, please ensure the host support it by checking the BIT(3) of caps in PVINFO. Virtual HWSP only support GEN8+ platform, since the HWSP MMIO may change follow the platform update, please add the corresponding MMIO emulation when enable new platforms in GVT-g. v3 : Add address audit in HWSP address update. v4 : Separate this patch with enalbe virtual HWSP in VM. Use intel_gvt_render_mmio_to_ring_id() to determine ring_id by offset. v5 : Remove unnessary check about Gen8, GVT-g only support Gen8+. Signed-off-by: Weinan Li Cc: Zhenyu Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/execlist.c | 16 +++++++++++++++ drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/handlers.c | 30 ++++++++++++++++++++++++++++- drivers/gpu/drm/i915/gvt/vgpu.c | 3 +++ 4 files changed, 49 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 5c966edfeefb6..c9fa0fb488d3a 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -133,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, struct execlist_context_status_pointer_format ctx_status_ptr; u32 write_pointer; u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; + unsigned long hwsp_gpa; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, _EL_OFFSET_STATUS_PTR); @@ -158,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, ctx_status_ptr.write_ptr = write_pointer; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; + /* Update the CSB and CSB write pointer in HWSP */ + hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + vgpu->hws_pga[ring_id]); + if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { + intel_gvt_hypervisor_write_gpa(vgpu, + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + + write_pointer * 8, + status, 8); + intel_gvt_hypervisor_write_gpa(vgpu, + hwsp_gpa + + intel_hws_csb_write_index(dev_priv) * 4, + &write_pointer, 4); + } + gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", vgpu->id, write_pointer, offset, status->ldw, status->udw); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index f08d194f639fa..27e8186cbc81a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -189,6 +189,7 @@ struct intel_vgpu { struct intel_vgpu_opregion opregion; struct intel_vgpu_display display; struct intel_vgpu_submission submission; + u32 hws_pga[I915_NUM_ENGINES]; struct dentry *debugfs; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bb16ef5c85104..56ee588377f48 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1380,6 +1380,34 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); } +static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 value = *(u32 *)p_data; + int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); + + if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { + gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n", + vgpu->id, offset, value); + return -EINVAL; + } + /* + * Need to emulate all the HWSP register write to ensure host can + * update the VM CSB status correctly. Here listed registers can + * support BDW, SKL or other platforms with same HWSP registers. + */ + if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) { + gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n", + vgpu->id, offset); + return -EINVAL; + } + vgpu->hws_pga[ring_id] = value; + gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n", + vgpu->id, value, offset); + + return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); +} + static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2535,7 +2563,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG - MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); + MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 99dbefadfe919..c6b82d1ba7dee 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -43,7 +43,10 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; + vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; + vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; + vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = From 4023f301d28fb18aac225fef59592a44b9fd42b3 Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Fri, 20 Oct 2017 21:52:29 +0800 Subject: [PATCH 39/50] drm/i915/gvt: opregion virtualization for win guest this is an enhanced opregion emulation for win guest support by initializing more data members including opregion header size, version and child device propertity for display port. for simplicity, redefined child_device_config structure. Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/opregion.c | 82 +++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index db8abac5faad4..2533d1ef1c569 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -63,6 +63,60 @@ struct bdb_data_header { u16 size; /* data size */ } __packed; +struct efp_child_device_config { + u16 handle; + u16 device_type; + u16 device_class; + u8 i2c_speed; + u8 dp_onboard_redriver; /* 158 */ + u8 dp_ondock_redriver; /* 158 */ + u8 hdmi_level_shifter_value:4; /* 169 */ + u8 hdmi_max_data_rate:4; /* 204 */ + u16 dtd_buf_ptr; /* 161 */ + u8 edidless_efp:1; /* 161 */ + u8 compression_enable:1; /* 198 */ + u8 compression_method:1; /* 198 */ + u8 ganged_edp:1; /* 202 */ + u8 skip0:4; + u8 compression_structure_index:4; /* 198 */ + u8 skip1:4; + u8 slave_port; /* 202 */ + u8 skip2; + u8 dvo_port; + u8 i2c_pin; /* for add-in card */ + u8 slave_addr; /* for add-in card */ + u8 ddc_pin; + u16 edid_ptr; + u8 dvo_config; + u8 efp_docked_port:1; /* 158 */ + u8 lane_reversal:1; /* 184 */ + u8 onboard_lspcon:1; /* 192 */ + u8 iboost_enable:1; /* 196 */ + u8 hpd_invert:1; /* BXT 196 */ + u8 slip3:3; + u8 hdmi_compat:1; + u8 dp_compat:1; + u8 tmds_compat:1; + u8 skip4:5; + u8 aux_channel; + u8 dongle_detect; + u8 pipe_cap:2; + u8 sdvo_stall:1; /* 158 */ + u8 hpd_status:2; + u8 integrated_encoder:1; + u8 skip5:2; + u8 dvo_wiring; + u8 mipi_bridge_type; /* 171 */ + u16 device_class_ext; + u8 dvo_function; + u8 dp_usb_type_c:1; /* 195 */ + u8 skip6:7; + u8 dp_usb_type_c_2x_gpio_index; /* 195 */ + u16 dp_usb_type_c_2x_gpio_pin; /* 195 */ + u8 iboost_dp:4; /* 196 */ + u8 iboost_hdmi:4; /* 196 */ +} __packed; + struct vbt { /* header->bdb_offset point to bdb_header offset */ struct vbt_header header; @@ -73,10 +127,11 @@ struct vbt { struct bdb_data_header general_definitions_header; struct bdb_general_definitions general_definitions; - struct child_device_config child0; - struct child_device_config child1; - struct child_device_config child2; - struct child_device_config child3; + + struct efp_child_device_config child0; + struct efp_child_device_config child1; + struct efp_child_device_config child2; + struct efp_child_device_config child3; struct bdb_data_header driver_features_header; struct bdb_driver_features driver_features; @@ -100,7 +155,7 @@ static void virt_vbt_generation(struct vbt *v) v->header.bdb_offset = offsetof(struct vbt, bdb_header); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); - v->bdb_header.version = 198; /* child_dev_size = 38 */ + v->bdb_header.version = 186; /* child_dev_size = 38 */ v->bdb_header.header_size = sizeof(v->bdb_header); v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) @@ -125,24 +180,32 @@ static void virt_vbt_generation(struct vbt *v) v->child0.device_type = DEVICE_TYPE_DP; v->child0.dvo_port = DVO_PORT_DPA; v->child0.aux_channel = DP_AUX_A; + v->child0.dp_compat = true; + v->child0.integrated_encoder = true; /* portB */ v->child1.handle = DEVICE_TYPE_EFP2; v->child1.device_type = DEVICE_TYPE_DP; v->child1.dvo_port = DVO_PORT_DPB; v->child1.aux_channel = DP_AUX_B; + v->child1.dp_compat = true; + v->child1.integrated_encoder = true; /* portC */ v->child2.handle = DEVICE_TYPE_EFP3; v->child2.device_type = DEVICE_TYPE_DP; v->child2.dvo_port = DVO_PORT_DPC; v->child2.aux_channel = DP_AUX_C; + v->child2.dp_compat = true; + v->child2.integrated_encoder = true; /* portD */ v->child3.handle = DEVICE_TYPE_EFP4; v->child3.device_type = DEVICE_TYPE_DP; v->child3.dvo_port = DVO_PORT_DPD; v->child3.aux_channel = DP_AUX_D; + v->child3.dp_compat = true; + v->child3.integrated_encoder = true; /* driver features */ v->driver_features_header.id = BDB_DRIVER_FEATURES; @@ -278,22 +341,23 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt) } /* emulated opregion with VBT mailbox only */ - header = (struct opregion_header *)gvt->opregion.opregion_va; + buf = (u8 *)gvt->opregion.opregion_va; + header = (struct opregion_header *)buf; memcpy(header->signature, OPREGION_SIGNATURE, sizeof(OPREGION_SIGNATURE)); + header->size = 0x8; + header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; /* for unknown reason, the value in LID field is incorrect * which block the windows guest, so workaround it by force * setting it to "OPEN" */ - buf = (u8 *)gvt->opregion.opregion_va; buf[INTEL_GVT_OPREGION_CLID] = 0x3; /* emulated vbt from virt vbt generation */ virt_vbt_generation(&v); - memcpy(gvt->opregion.opregion_va + INTEL_GVT_OPREGION_VBT_OFFSET, - &v, sizeof(struct vbt)); + memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt)); return 0; } From 7cb16018f5b4f04bc58a8752bfd11067bafeb552 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Mon, 23 Oct 2017 11:46:43 +0800 Subject: [PATCH 40/50] drm/i915/gvt: Add mmio iterator intel_gvt_for_each_tracked_mmio() This patch add a function intel_gvt_for_each_tracked_mmio() to iterate each tracked mmio. The caller don't be aware of how the tracked mmios are presented internally. v2: remove snapshot_hw_mmio_registers(). Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/firmware.c | 26 ++++++++++------------ drivers/gpu/drm/i915/gvt/handlers.c | 34 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/mmio.h | 4 ++++ 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index a26c1705430eb..a73e1d418c228 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -66,20 +66,23 @@ static struct bin_attribute firmware_attr = { .mmap = NULL, }; -static int expose_firmware_sysfs(struct intel_gvt *gvt) +static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) { struct drm_i915_private *dev_priv = gvt->dev_priv; + + *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset)); + return 0; +} + +static int expose_firmware_sysfs(struct intel_gvt *gvt) +{ struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; - struct intel_gvt_mmio_info *e; - struct gvt_mmio_block *block = gvt->mmio.mmio_block; - int num = gvt->mmio.num_mmio_block; struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i, j; - int ret; + int i, ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); @@ -104,15 +107,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) p = firmware + h->mmio_offset; - hash_for_each(gvt->mmio.mmio_info_table, i, e, node) - *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); - - for (i = 0; i < num; i++, block++) { - for (j = 0; j < block->size; j += 4) - *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) = - I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET( - block->offset) + j)); - } + /* Take a snapshot of hw mmio registers. */ + intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); memcpy(gvt->firmware.mmio, p, info->mmio_size); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 56ee588377f48..4f8d470dbb3fa 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2971,6 +2971,40 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) return ret; } +/** + * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio + * @gvt: a GVT device + * @handler: the handler + * @data: private data given to handler + * + * Returns: + * Zero on success, negative error code if failed. + */ +int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, + int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), + void *data) +{ + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct intel_gvt_mmio_info *e; + int i, j, ret; + + hash_for_each(gvt->mmio.mmio_info_table, i, e, node) { + ret = handler(gvt, e->offset, data); + if (ret) + return ret; + } + + for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { + for (j = 0; j < block->size; j += 4) { + ret = handler(gvt, + INTEL_GVT_MMIO_OFFSET(block->offset) + j, + data); + if (ret) + return ret; + } + } + return 0; +} /** * intel_vgpu_default_mmio_read - default MMIO read handler diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index dbc04ad2c7a1e..62709ac351cdc 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -72,6 +72,10 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); +int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, + int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), + void *data); + #define INTEL_GVT_MMIO_OFFSET(reg) ({ \ typeof(reg) __reg = reg; \ From cea9083e35b2f771624a18126b10ef1fd7806040 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Mon, 23 Oct 2017 11:46:44 +0800 Subject: [PATCH 41/50] drm/i915/gvt: Add new debugfs tool mmio_diff This new debugfs entry is used to figure out which registers of vGPU is different to host. It is a useful tool for new platform enabling and debugging. When read this entry, all the diff mmio are recognized and sorted by mmio offset. Besides, the bit positions of different value are listed in 'Diff' column. Here is a show: $ sudo cat ./mmio_diff Offset HW vGPU Diff 00002030 000025f8 00000000 3-8,10,13 00002034 012025f8 00000000 3-8,10,13,21,24 00002038 027fb000 00000000 12-13,15-22,25 0000203c 00003000 00000000 12-13 00002054 0000000a 00000040 1,3,6 00002074 012025f8 00000000 3-8,10,13,21,24 00002080 fffe6000 00000000 13-14,17-31 000020a8 fffffeff ffffffff 8 000020d4 00000004 00000000 2 .... 00145974 eb42718c 010c11b0 2-5,13-14,17-19,22,25,27,29-31 00145978 0000002f 0000002a 0,2 0014597c 0000002f 0000002a 0,2 00145980 0000002b 00000028 0-1 00145984 a5a87c9e b27d20c0 1-4,6,10-12,14,16,18,20,22-26,28 001459c0 88390000 883c0000 16,18 00146200 88350000 883a0000 16-19 Total: 72432, Diff: 901 v3: fix a typo. v2: add mmio_hw_access_pre/post(). Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/debugfs.c | 115 +++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 02acd58426055..32a66dfdf1127 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -21,9 +21,119 @@ * SOFTWARE. */ #include +#include #include "i915_drv.h" #include "gvt.h" +struct mmio_diff_param { + struct intel_vgpu *vgpu; + int total; + int diff; + struct list_head diff_mmio_list; +}; + +struct diff_mmio { + struct list_head node; + u32 offset; + u32 preg; + u32 vreg; +}; + +/* Compare two diff_mmio items. */ +static int mmio_offset_compare(void *priv, + struct list_head *a, struct list_head *b) +{ + struct diff_mmio *ma; + struct diff_mmio *mb; + + ma = container_of(a, struct diff_mmio, node); + mb = container_of(b, struct diff_mmio, node); + if (ma->offset < mb->offset) + return -1; + else if (ma->offset > mb->offset) + return 1; + return 0; +} + +static inline int mmio_diff_handler(struct intel_gvt *gvt, + u32 offset, void *data) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct mmio_diff_param *param = data; + struct diff_mmio *node; + u32 preg, vreg; + + preg = I915_READ_NOTRACE(_MMIO(offset)); + vreg = vgpu_vreg(param->vgpu, offset); + + if (preg != vreg) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->offset = offset; + node->preg = preg; + node->vreg = vreg; + list_add(&node->node, ¶m->diff_mmio_list); + param->diff++; + } + param->total++; + return 0; +} + +/* Show the all the different values of tracked mmio. */ +static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) +{ + struct intel_vgpu *vgpu = s->private; + struct intel_gvt *gvt = vgpu->gvt; + struct mmio_diff_param param = { + .vgpu = vgpu, + .total = 0, + .diff = 0, + }; + struct diff_mmio *node, *next; + + INIT_LIST_HEAD(¶m.diff_mmio_list); + + mutex_lock(&gvt->lock); + spin_lock_bh(&gvt->scheduler.mmio_context_lock); + + mmio_hw_access_pre(gvt->dev_priv); + /* Recognize all the diff mmios to list. */ + intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, ¶m); + mmio_hw_access_post(gvt->dev_priv); + + spin_unlock_bh(&gvt->scheduler.mmio_context_lock); + mutex_unlock(&gvt->lock); + + /* In an ascending order by mmio offset. */ + list_sort(NULL, ¶m.diff_mmio_list, mmio_offset_compare); + + seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff"); + list_for_each_entry_safe(node, next, ¶m.diff_mmio_list, node) { + u32 diff = node->preg ^ node->vreg; + + seq_printf(s, "%08x %08x %08x %*pbl\n", + node->offset, node->preg, node->vreg, + 32, &diff); + list_del(&node->node); + kfree(node); + } + seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff); + return 0; +} + +static int vgpu_mmio_diff_open(struct inode *inode, struct file *file) +{ + return single_open(file, vgpu_mmio_diff_show, inode->i_private); +} + +static const struct file_operations vgpu_mmio_diff_fops = { + .open = vgpu_mmio_diff_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU @@ -47,6 +157,11 @@ int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) if (!ent) return -ENOMEM; + ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, + vgpu, &vgpu_mmio_diff_fops); + if (!ent) + return -ENOMEM; + return 0; } From 0bde438b7e2200fc961625ac7b828cbc8c6d78e1 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 26 Oct 2017 15:29:13 +0800 Subject: [PATCH 42/50] MAINTAINERS: Update gvt-linux.git new repo place gvt-linux.git repo is moved for its new place under https://github.com/intel/gvt-linux.git Old https://github.com/01org/gvt-linux.git is set only for redirect now. Signed-off-by: Zhenyu Wang --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 6e1f94b4ed263..9e6cf2209e81a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6928,7 +6928,7 @@ M: Zhi Wang L: intel-gvt-dev@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org W: https://01.org/igvt-g -T: git https://github.com/01org/gvt-linux.git +T: git https://github.com/intel/gvt-linux.git S: Supported F: drivers/gpu/drm/i915/gvt/ From ffe2a503b06cc300e25c1dfc00c85a2240cf97d7 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Mon, 30 Oct 2017 14:19:15 +0800 Subject: [PATCH 43/50] drm/i915/gvt: Reduce rcs mocs switch latency Use I915_WRITE_FW instead of I915_WRITE to reduce overhead. The overall mmio switch latency lowers from ~600us to ~180us. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/render.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index e16c3551b4a3e..0672178548ef9 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -209,7 +209,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { gen9_render_mocs[ring_id][i] = I915_READ_FW(offset); - I915_WRITE(offset, vgpu_vreg(vgpu, offset)); + I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset)); offset.reg += 4; } From e4aeba697943692652246a2d0a040f0859d70e86 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 2 Nov 2017 13:33:23 +0800 Subject: [PATCH 44/50] drm/i915/gvt: Don't dump partial state in cmd parser I have seen the cmd parser dump partial odd info. Stop that and only dump the full verbose info when debug enabled. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 8c8514ee73ac0..18c45734c7a27 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s) print_opcode(cmd_val(s, 0), s->ring_id); - /* print the whole page to trace */ - pr_err(" ip_va=%p: %08x %08x %08x %08x\n", - s->ip_va, cmd_val(s, 0), cmd_val(s, 1), - cmd_val(s, 2), cmd_val(s, 3)); - s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); while (cnt < 1024) { - pr_err("ip_va=%p: ", s->ip_va); + gvt_dbg_cmd("ip_va=%p: ", s->ip_va); for (i = 0; i < 8; i++) - pr_err("%08x ", cmd_val(s, i)); - pr_err("\n"); + gvt_dbg_cmd("%08x ", cmd_val(s, i)); + gvt_dbg_cmd("\n"); s->ip_va += 8 * sizeof(u32); cnt += 8; From c982c45db6a78e1c23a416785fe920542ae1f33e Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 2 Nov 2017 13:33:32 +0800 Subject: [PATCH 45/50] drm/i915/gvt: Make gvt_vgpu_err use pr_err gvt_vgpu_err means something goes wrong. We need the error propagates to kernel message by default. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/debug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h index d98d5a0b928c7..c6027125c1ec2 100644 --- a/drivers/gpu/drm/i915/gvt/debug.h +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -30,9 +30,9 @@ #define gvt_vgpu_err(fmt, args...) \ do { \ if (IS_ERR_OR_NULL(vgpu)) \ - pr_debug("gvt: "fmt, ##args); \ + pr_err("gvt: "fmt, ##args); \ else \ - pr_debug("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ + pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ } while (0) #define gvt_dbg_core(fmt, args...) \ From c4270d122ccff963a021d1beb893d6192336af96 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 2 Nov 2017 13:33:42 +0800 Subject: [PATCH 46/50] drm/i915/gvt: Emulate PCI expansion ROM base address register Our vGPU doesn't have a device ROM, we need follow the PCI spec to report this info to drivers. Otherwise, we would see below errors. Inspecting possible rom at 0xfe049000 (vd=8086:1912 bdf=00:10.0) qemu-system-x86_64: vfio-pci: Cannot read device rom at 00000000-0000-0000-0000-000000000001 Device option ROM contents are probably invalid (check dmesg). Skip option ROM probe with rombar=0, or load from file with romfile=No option rom signature (got 4860) I will also send a improvement patch to PCI subsystem related to PCI ROM. But no idea to omit below error, since no pattern to detect vbios shadow without touch its content. 0000:00:10.0: Invalid PCI ROM header signature: expecting 0xaa55, got 0x0000 Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cfg_space.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index ab19545d59a18..4ce2e6bd06803 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, return 0; } +static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); + u32 new = *(u32 *)(p_data); + + if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK) + /* We don't have rom, return size of 0. */ + *pval = 0; + else + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); + return 0; +} + static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } switch (rounddown(offset, 4)) { + case PCI_ROM_ADDRESS: + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes); + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; @@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, pci_resource_len(gvt->dev_priv->drm.pdev, 0); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = pci_resource_len(gvt->dev_priv->drm.pdev, 2); + + memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); } /** From 5c35258de6cfc0829803fe9f9869253a4e600925 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 2 Nov 2017 17:44:52 +0800 Subject: [PATCH 47/50] Revert "drm/i915/gvt: Refine broken PPGTT scratch" This reverts commit b20d09886fd1b74cd2255d846029a049e524db14. This caused windows driver boot errors for invalid page address. Revert for now. Signed-off-by: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 196 +++++++++++++++------------------ drivers/gpu/drm/i915/gvt/gtt.h | 17 ++- 2 files changed, 101 insertions(+), 112 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6fa9271e23a56..3d6008b116e57 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -841,51 +841,20 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( return NULL; } -static bool ppgtt_is_scratch_entry(struct intel_vgpu *vgpu, - struct intel_gvt_gtt_entry *e) -{ - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - int pt_type = get_pt_type(e->type); - - if (pt_type == GTT_TYPE_INVALID) - return false; - - if (ops->get_pfn(e) == vgpu->gtt.ppgtt_scratch_page[pt_type].mfn) - return true; - - return false; -} - -static void ppgtt_get_scratch_entry(struct intel_vgpu *vgpu, int type, - struct intel_gvt_gtt_entry *e) -{ - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - struct intel_vgpu_shadow_page *scratch_page; - int pt_type = get_pt_type(type); - - if (WARN_ON(pt_type == GTT_TYPE_INVALID)) - return; - - scratch_page = &vgpu->gtt.ppgtt_scratch_page[pt_type]; - - e->type = get_entry_type(type); - ops->get_entry(scratch_page->vaddr, e, 0, false, 0, vgpu); -} - #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) #define pt_entries(spt) \ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) -#define for_each_guest_entry(spt, e, i) \ +#define for_each_present_guest_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ - if (!ppgtt_get_guest_entry(spt, e, i)) + if (!ppgtt_get_guest_entry(spt, e, i) && \ + spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_present_shadow_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ - !ppgtt_is_scratch_entry(spt->vgpu, e) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) @@ -904,13 +873,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; + intel_gvt_gtt_type_t cur_pt_type; if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) return -EINVAL; - if (WARN_ON(ppgtt_is_scratch_entry(vgpu, e))) - return -EINVAL; - + if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY + && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { + cur_pt_type = get_next_pt_type(e->type) + 1; + if (ops->get_pfn(e) == + vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) + return 0; + } s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); if (!s) { gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", @@ -1023,7 +997,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; unsigned long i; @@ -1033,34 +1006,22 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) spt->guest_page.track.gfn, spt->shadow_page.type); if (gtt_type_is_pte_pt(spt->shadow_page.type)) { - for_each_guest_entry(spt, &ge, i) { - if (ops->test_present(&ge)) { - ret = gtt_entry_p2m(vgpu, &ge, &se); - if (ret) - goto fail; - } else { - ppgtt_get_scratch_entry(vgpu, - spt->shadow_page.type, &se); - } + for_each_present_guest_entry(spt, &ge, i) { + ret = gtt_entry_p2m(vgpu, &ge, &se); + if (ret) + goto fail; ppgtt_set_shadow_entry(spt, &se, i); } return 0; } - for_each_guest_entry(spt, &ge, i) { + for_each_present_guest_entry(spt, &ge, i) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { gvt_vgpu_err("GVT doesn't support pse bit now\n"); ret = -EINVAL; goto fail; } - if (!ops->test_present(&ge)) { - ppgtt_get_scratch_entry(vgpu, spt->shadow_page.type, - &se); - ppgtt_set_shadow_entry(spt, &se, i); - continue; - } - s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); if (IS_ERR(s)) { ret = PTR_ERR(s); @@ -1092,7 +1053,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, if (!ops->test_present(se)) return 0; - if (ppgtt_is_scratch_entry(vgpu, se)) + if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn) return 0; if (gtt_type_is_pt(get_next_pt_type(se->type))) { @@ -1331,6 +1292,7 @@ static int ppgtt_handle_guest_write_page_table( { struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu *vgpu = spt->vgpu; + int type = spt->shadow_page.type; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se; @@ -1357,7 +1319,7 @@ static int ppgtt_handle_guest_write_page_table( goto fail; if (!new_present) { - ppgtt_get_scratch_entry(vgpu, spt->shadow_page.type, &se); + ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &se, index); } @@ -2006,85 +1968,106 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; } -static void ppgtt_destroy_scratch(struct intel_vgpu *vgpu) -{ - struct intel_vgpu_shadow_page *scratch_page; - int i; - - for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - scratch_page = &vgpu->gtt.ppgtt_scratch_page[i]; - if (scratch_page->page != NULL) { - clean_shadow_page(vgpu, scratch_page); - __free_page(scratch_page->page); - } - } -} - -static int setup_ppgtt_scratch_page(struct intel_vgpu *vgpu, +static int alloc_scratch_pages(struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { - struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_device_info *info = &gvt->device_info; - int num_entries = I915_GTT_PAGE_SIZE >> info->gtt_entry_size_shift; struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - struct intel_vgpu_shadow_page *scratch_page; - struct intel_gvt_gtt_entry e; - intel_gvt_gtt_type_t next_pt_type; - int ret, i; + int page_entry_num = I915_GTT_PAGE_SIZE >> + vgpu->gvt->device_info.gtt_entry_size_shift; + void *scratch_pt; + int i; + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_page = >t->ppgtt_scratch_page[type]; - - scratch_page->page = alloc_page(GFP_KERNEL); - if (!scratch_page) { + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); + if (!scratch_pt) { gvt_vgpu_err("fail to allocate scratch page\n"); return -ENOMEM; } - ret = init_shadow_page(vgpu, scratch_page, type, false); - if (ret) { - gvt_vgpu_err("fail to allocate scratch page\n"); - __free_page(scratch_page->page); + daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, + 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, daddr)) { + gvt_vgpu_err("fail to dmamap scratch_pt\n"); + __free_page(virt_to_page(scratch_pt)); return -ENOMEM; } - - memset(&e, 0, sizeof(e)); - - if (type == GTT_TYPE_PPGTT_PTE_PT) { - e.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; - ops->set_pfn(&e, gvt->gtt.scratch_mfn); - } else { - next_pt_type = get_next_pt_type(type); - e.type = get_entry_type(type); - ops->set_pfn(&e, gtt->ppgtt_scratch_page[next_pt_type].mfn); + gtt->scratch_pt[type].page_mfn = + (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); + gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", + vgpu->id, type, gtt->scratch_pt[type].page_mfn); + + /* Build the tree by full filled the scratch pt with the entries which + * point to the next level scratch pt or scratch page. The + * scratch_pt[type] indicate the scratch pt/scratch page used by the + * 'type' pt. + * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self + * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. + */ + if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { + struct intel_gvt_gtt_entry se; + + memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); + se.type = get_entry_type(type - 1); + ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); + + /* The entry parameters like present/writeable/cache type + * set to the same as i915's scratch page tree. + */ + se.val64 |= _PAGE_PRESENT | _PAGE_RW; + if (type == GTT_TYPE_PPGTT_PDE_PT) + se.val64 |= PPAT_CACHED; + + for (i = 0; i < page_entry_num; i++) + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - ops->set_present(&e); + return 0; +} + +static int release_scratch_page_tree(struct intel_vgpu *vgpu) +{ + int i; + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; - for (i = 0; i < num_entries; i++) - ops->set_entry(scratch_page->vaddr, &e, i, false, 0, vgpu); + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) { + daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << + I915_GTT_PAGE_SHIFT); + dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + __free_page(vgpu->gtt.scratch_pt[i].page); + vgpu->gtt.scratch_pt[i].page = NULL; + vgpu->gtt.scratch_pt[i].page_mfn = 0; + } + } return 0; } -static int ppgtt_create_scratch(struct intel_vgpu *vgpu) +static int create_scratch_page_tree(struct intel_vgpu *vgpu) { int i, ret; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - ret = setup_ppgtt_scratch_page(vgpu, i); + ret = alloc_scratch_pages(vgpu, i); if (ret) goto err; } + return 0; + err: - ppgtt_destroy_scratch(vgpu); + release_scratch_page_tree(vgpu); return ret; } - + /** * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization * @vgpu: a vGPU @@ -2117,7 +2100,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) } gtt->ggtt_mm = ggtt_mm; - return ppgtt_create_scratch(vgpu); + + return create_scratch_page_tree(vgpu); } static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) @@ -2149,7 +2133,7 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { ppgtt_free_all_shadow_page(vgpu); - ppgtt_destroy_scratch(vgpu); + release_scratch_page_tree(vgpu); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 416b2f868cf09..f98c1c19b4cb4 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -188,12 +188,9 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref); struct intel_vgpu_guest_page; -struct intel_vgpu_shadow_page { - void *vaddr; +struct intel_vgpu_scratch_pt { struct page *page; - int type; - struct hlist_node node; - unsigned long mfn; + unsigned long page_mfn; }; struct intel_vgpu_gtt { @@ -205,7 +202,7 @@ struct intel_vgpu_gtt { atomic_t n_tracked_guest_page; struct list_head oos_page_list_head; struct list_head post_shadow_list_head; - struct intel_vgpu_shadow_page ppgtt_scratch_page[GTT_TYPE_MAX]; + struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); @@ -221,6 +218,14 @@ extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, struct intel_vgpu_oos_page; +struct intel_vgpu_shadow_page { + void *vaddr; + struct page *page; + int type; + struct hlist_node node; + unsigned long mfn; +}; + struct intel_vgpu_page_track { struct hlist_node node; bool tracked; From 295764cd2ff41e2c1bc8af4050de77cec5e7a1c0 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Tue, 7 Nov 2017 05:23:02 +0800 Subject: [PATCH 48/50] drm/i915/gvt: Limit read hw reg to active vgpu mmio_read_from_hw() let vgpu could read hw reg, if vgpu's workload is running on hw, things is good. Otherwise vgpu will get other vgpu's reg val, it is unsafe. This patch limit such hw access to active vgpu. If vgpu isn't running on hw, the reg read of this vgpu will get the last active val which saved at schedule_out. v2: ring timestamp is walking continuously even if the ring is idle. so read hw directly. (Zhenyu) Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 26 ++++++++++++++++++++++---- drivers/gpu/drm/i915/gvt/scheduler.c | 15 +++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4f8d470dbb3fa..880448d4adc7e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1471,11 +1471,29 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ring_id; + u32 ring_base; + + ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset); + /** + * Read HW reg in following case + * a. the offset isn't a ring mmio + * b. the offset's ring is running on hw. + * c. the offset is ring time stamp mmio + */ + if (ring_id >= 0) + ring_base = dev_priv->engine[ring_id]->mmio_base; + + if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] || + offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) || + offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + mmio_hw_access_post(dev_priv); + } - mmio_hw_access_pre(dev_priv); - vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); - mmio_hw_access_post(dev_priv); return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7a1ffaa9ae06d..9749113fccdd5 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req) return i915_gem_context_force_single_submission(req->ctx); } +static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u32 ring_base = dev_priv->engine[ring_id]->mmio_base; + i915_reg_t reg; + + reg = RING_INSTDONE(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); + reg = RING_ACTHD(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); + reg = RING_ACTHD_UDW(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); +} + static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { @@ -175,6 +189,7 @@ static int shadow_context_status_change(struct notifier_block *nb, break; case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_PREEMPTED: + save_ring_hw_state(workload->vgpu, ring_id); atomic_set(&workload->shadow_ctx_active, 0); break; default: From b2d6ef70614e9e9dacfa9fc7dac49e7dc22dc8b3 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Wed, 8 Nov 2017 00:45:21 +0800 Subject: [PATCH 49/50] drm/i915/gvt: Let each vgpu has separate opregion memory Currently every vgpu share a common gvt opregion memory, but it is freed at vgpu destroy, then the later vgpu doesn't have opregion memory once the first vgpu is destroyed. This cause guest function failure like reboot, second or later boot. This patch allocate and init virt opregion memory for each vgpu, so this memory could be freed at vgpu destroy. Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 9 +-- drivers/gpu/drm/i915/gvt/gvt.h | 10 --- drivers/gpu/drm/i915/gvt/opregion.c | 101 +++++++++++----------------- 3 files changed, 42 insertions(+), 78 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index cef06d59884e3..3a74a408a9665 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -323,7 +323,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_clean_cmd_parser(gvt); intel_gvt_clean_sched_policy(gvt); intel_gvt_clean_workload_scheduler(gvt); - intel_gvt_clean_opregion(gvt); intel_gvt_clean_gtt(gvt); intel_gvt_clean_irq(gvt); intel_gvt_clean_mmio_info(gvt); @@ -397,13 +396,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) goto out_clean_irq; - ret = intel_gvt_init_opregion(gvt); - if (ret) - goto out_clean_gtt; - ret = intel_gvt_init_workload_scheduler(gvt); if (ret) - goto out_clean_opregion; + goto out_clean_gtt; ret = intel_gvt_init_sched_policy(gvt); if (ret) @@ -460,8 +455,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) intel_gvt_clean_sched_policy(gvt); out_clean_workload_scheduler: intel_gvt_clean_workload_scheduler(gvt); -out_clean_opregion: - intel_gvt_clean_opregion(gvt); out_clean_gtt: intel_gvt_clean_gtt(gvt); out_clean_irq: diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 27e8186cbc81a..393066726993e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -125,7 +125,6 @@ struct intel_vgpu_irq { struct intel_vgpu_opregion { void *va; u32 gfn[INTEL_GVT_OPREGION_PAGES]; - struct page *pages[INTEL_GVT_OPREGION_PAGES]; }; #define vgpu_opregion(vgpu) (&(vgpu->opregion)) @@ -265,11 +264,6 @@ struct intel_gvt_firmware { bool firmware_loaded; }; -struct intel_gvt_opregion { - void *opregion_va; - u32 opregion_pa; -}; - #define NR_MAX_INTEL_VGPU_TYPES 20 struct intel_vgpu_type { char name[16]; @@ -293,7 +287,6 @@ struct intel_gvt { struct intel_gvt_firmware firmware; struct intel_gvt_irq irq; struct intel_gvt_gtt gtt; - struct intel_gvt_opregion opregion; struct intel_gvt_workload_scheduler scheduler; struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); @@ -511,9 +504,6 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) PCI_BASE_ADDRESS_MEM_MASK; } -void intel_gvt_clean_opregion(struct intel_gvt *gvt); -int intel_gvt_init_opregion(struct intel_gvt *gvt); - void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 2533d1ef1c569..80720e59723ab 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -213,16 +213,55 @@ static void virt_vbt_generation(struct vbt *v) v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS; } +static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) +{ + u8 *buf; + struct opregion_header *header; + struct vbt v; + + gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); + if (!vgpu_opregion(vgpu)->va) { + gvt_err("fail to get memory for vgpu virt opregion\n"); + return -ENOMEM; + } + + /* emulated opregion with VBT mailbox only */ + buf = (u8 *)vgpu_opregion(vgpu)->va; + header = (struct opregion_header *)buf; + memcpy(header->signature, OPREGION_SIGNATURE, + sizeof(OPREGION_SIGNATURE)); + header->size = 0x8; + header->opregion_ver = 0x02000000; + header->mboxes = MBOX_VBT; + + /* for unknown reason, the value in LID field is incorrect + * which block the windows guest, so workaround it by force + * setting it to "OPEN" + */ + buf[INTEL_GVT_OPREGION_CLID] = 0x3; + + /* emulated vbt from virt vbt generation */ + virt_vbt_generation(&v); + memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt)); + + return 0; +} + static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) { - int i; + int i, ret; if (WARN((vgpu_opregion(vgpu)->va), "vgpu%d: opregion has been initialized already.\n", vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = vgpu->gvt->opregion.opregion_va; + ret = alloc_and_init_virt_opregion(vgpu); + if (ret < 0) + return ret; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; @@ -304,64 +343,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) return 0; } -/** - * intel_gvt_clean_opregion - clean host opergion related stuffs - * @gvt: a GVT device - * - */ -void intel_gvt_clean_opregion(struct intel_gvt *gvt) -{ - free_pages((unsigned long)gvt->opregion.opregion_va, - get_order(INTEL_GVT_OPREGION_SIZE)); - gvt->opregion.opregion_va = NULL; -} - -/** - * intel_gvt_init_opregion - initialize host opergion related stuffs - * @gvt: a GVT device - * - * Returns: - * Zero on success, negative error code if failed. - */ -int intel_gvt_init_opregion(struct intel_gvt *gvt) -{ - u8 *buf; - struct opregion_header *header; - struct vbt v; - - gvt_dbg_core("init host opregion\n"); - - gvt->opregion.opregion_va = (void *)__get_free_pages(GFP_KERNEL | - __GFP_ZERO, - get_order(INTEL_GVT_OPREGION_SIZE)); - - if (!gvt->opregion.opregion_va) { - gvt_err("fail to get memory for virt opregion\n"); - return -ENOMEM; - } - - /* emulated opregion with VBT mailbox only */ - buf = (u8 *)gvt->opregion.opregion_va; - header = (struct opregion_header *)buf; - memcpy(header->signature, OPREGION_SIGNATURE, - sizeof(OPREGION_SIGNATURE)); - header->size = 0x8; - header->opregion_ver = 0x02000000; - header->mboxes = MBOX_VBT; - - /* for unknown reason, the value in LID field is incorrect - * which block the windows guest, so workaround it by force - * setting it to "OPEN" - */ - buf[INTEL_GVT_OPREGION_CLID] = 0x3; - - /* emulated vbt from virt vbt generation */ - virt_vbt_generation(&v); - memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt)); - - return 0; -} - #define GVT_OPREGION_FUNC(scic) \ ({ \ u32 __ret; \ From f2880e04f3a5419366926182fc97a3c2e4fd8f2a Mon Sep 17 00:00:00 2001 From: fred gao Date: Tue, 14 Nov 2017 17:09:35 +0800 Subject: [PATCH 50/50] drm/i915/gvt: Move request alloc to dispatch_workload path only Previously the performance is improved through the workload auditing and shadowing ahead of vGPU scheduling, however, there is the case that more requests are allocated in submit_context before the previous request is added, the timeline will hold its seqno which is later. This patch is to move the request alloc to dispatch_workload function, where is the same place as request is added. It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno) check when add_request. Fixes: 89ea20b930cb ("drm/i915/gvt: Factor out scan and shadow from workload dispatch") Signed-off-by: Chuanxiao Dong Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 31 ++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 9749113fccdd5..a742b364c2c3e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -270,7 +270,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ring_id = workload->ring_id; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct drm_i915_gem_request *rq; struct intel_ring *ring; int ret; @@ -315,6 +314,27 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ret = populate_shadow_context(workload); if (ret) goto err_unpin; + workload->shadowed = true; + return 0; + +err_unpin: + engine->context_unpin(engine, shadow_ctx); +err_shadow: + release_shadow_wa_ctx(&workload->wa_ctx); +err_scan: + return ret; +} + +static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + struct drm_i915_gem_request *rq; + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + int ret; rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { @@ -329,14 +349,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ret = copy_workload_to_ring_buffer(workload); if (ret) goto err_unpin; - workload->shadowed = true; return 0; err_unpin: engine->context_unpin(engine, shadow_ctx); -err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); -err_scan: return ret; } @@ -496,6 +513,12 @@ static int prepare_workload(struct intel_vgpu_workload *workload) goto err_unpin_mm; } + ret = intel_gvt_generate_request(workload); + if (ret) { + gvt_vgpu_err("fail to generate request\n"); + goto err_unpin_mm; + } + ret = prepare_shadow_batch_buffer(workload); if (ret) { gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");