From 3e70c5d6ea510e38f612d07fa0fd7487277b7087 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Mon, 26 Dec 2016 14:52:23 +0100 Subject: [PATCH 01/14] drm/i915/gvt: verify functions types in new_mmio_info() The current prototype of new_mmio_info() uses void* for parameters read and write, which are functions with precise calling conventions (argument types and return type). Write down these conventions in new_mmio_info() definition. This has been reported by the following warnings when clang is used to build the kernel: drivers/gpu/drm/i915/gvt/handlers.c:124:21: error: pointer type mismatch ('void *' and 'int (*)(struct intel_vgpu *, unsigned int, void *, unsigned int)') [-Werror,-Wpointer-type-mismatch] info->read = read ? read : intel_vgpu_default_mmio_read; ^ ~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/i915/gvt/handlers.c:125:23: error: pointer type mismatch ('void *' and 'int (*)(struct intel_vgpu *, unsigned int, void *, unsigned int)') [-Werror,-Wpointer-type-mismatch] info->write = write ? write : intel_vgpu_default_mmio_write; ^ ~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This allows the compiler to detect that sbi_ctl_mmio_write() returns a "bool" value instead of an expected "int" one. Fix this. Signed-off-by: Nicolas Iooss Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312c..052e57124c0a5 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, static int new_mmio_info(struct intel_gvt *gvt, u32 offset, u32 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, - void *read, void *write) + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) { struct intel_gvt_mmio_info *info, *p; u32 start, end, i; @@ -974,7 +975,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, +static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; From a12010534d0984f91bc5bdcf9e27bd55e20d82da Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Dec 2016 13:24:52 +0800 Subject: [PATCH 02/14] drm/i915/gvt: fix error handing of tlb_control emulation Return ealier for a invalid access, else it would false set tlb flag for RCS. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 052e57124c0a5..e06d5f37bb92d 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1367,7 +1367,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int rc = 0; unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); @@ -1390,12 +1389,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, id = VECS; break; default: - rc = -EINVAL; - break; + return -EINVAL; } set_bit(id, (void *)vgpu->tlb_handle_pending); - return rc; + return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, From 39762ad437f1149b904e6baeaf28824da34a89c1 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Dec 2016 13:25:06 +0800 Subject: [PATCH 03/14] drm/i915/gvt: fix return value in mul_force_wake_write All mmio handlers should return a negetive value for failure, not 1. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index e06d5f37bb92d..8cbaf1c83720b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -220,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, default: /*should not hit here*/ gvt_err("invalid forcewake offset 0x%x\n", offset); - return 1; + return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; From 905a5035ebe79e89712cda0bed1887c73aa8e9bb Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Fri, 30 Dec 2016 14:10:53 +0800 Subject: [PATCH 04/14] drm/i915/gvt: always use readq and writeq The readq and writeq are already offered by drm_os_linux.h. So we can use them directly whithout dectecting their presence. This patch removed the duplicated code. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5b2ce2a..a32e59de0eff9 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; - u64 pte; -#ifdef readq - pte = readq(addr); -#else - pte = ioread32(addr); - pte |= (u64)ioread32(addr + 4) << 32; -#endif - return pte; + return readq(addr); } static void write_pte64(struct drm_i915_private *dev_priv, @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; -#ifdef writeq writeq(pte, addr); -#else - iowrite32((u32)pte, addr); - iowrite32(pte >> 32, addr + 4); -#endif + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); } From 901a14b721feef1b37cfe6362ee103e135133677 Mon Sep 17 00:00:00 2001 From: Pei Zhang Date: Wed, 4 Jan 2017 22:32:23 +0800 Subject: [PATCH 05/14] drm/i915/gvt: print correct value for untracked mmio In function intel_vgpu_emulate_mmio_read, the untracked mmio register is dumped through kernel log, but the register value is not correct. This patch fixes this issue. V2: fix the fromat warning from checkpatch.pl. Signed-off-by: Pei Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a19462..e60701397ac21 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); - if (!mmio && !vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); - - if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); - vgpu->mmio.disable_warn_untrack = true; - } - } - if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); - } else + } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); + if (!vgpu->mmio.disable_warn_untrack) { + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", + vgpu->id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + gvt_err("------------------------------------------\n"); + gvt_err("vgpu%d: likely triggers a gfx reset\n", + vgpu->id); + gvt_err("------------------------------------------\n"); + vgpu->mmio.disable_warn_untrack = true; + } + } + } + if (ret) goto err; From 888530b57f88f2bc856f181479df732c9622fa22 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 5 Jan 2017 10:26:13 +0800 Subject: [PATCH 06/14] drm/i915/gvt: adjust high memory size for default vGPU type Previous high mem size initialized for vGPU type was too small which caused failure for some VMs. This trys to take minimal value of 384MB for each VM and enlarge default high mem size to make guest driver happy. Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/vgpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d57773..398abb98dd0a8 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -177,7 +177,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) if (low_avail / min_low == 0) break; gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].avail_instance = gvt->types[i].max_instance; @@ -217,7 +217,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; From 2fcdb66364ee467d69228a3d2ea074498c177211 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 5 Jan 2017 10:26:24 +0800 Subject: [PATCH 07/14] drm/i915/gvt: remove duplicated definition Remove duplicated definition for resource size in aperture_gm.c which are already defined in gvt.h. Need only one to take effect. Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/aperture_gm.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea63..65200313515cc 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -37,13 +37,6 @@ #include "i915_drv.h" #include "gvt.h" -#define MB_TO_BYTES(mb) ((mb) << 20ULL) -#define BYTES_TO_MB(b) ((b) >> 20ULL) - -#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) -#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) -#define HOST_FENCE 4 - static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; From 2e51ef32b0d66fcd5fe45c437cf7c6aef8350746 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 5 Jan 2017 13:28:05 +0800 Subject: [PATCH 08/14] drm/i915/gvt: fix use after free for workload In the function workload_thread(), we invoke complete_current_workload() to cleanup the just processed workload (workload will be freed there). So we cannot access workload->req after that. This patch move complete_current_workload() afterward. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db2422502352..c694dd039f3bb 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -459,11 +459,11 @@ static int workload_thread(void *priv) gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - complete_current_workload(gvt, ring_id); - if (workload->req) i915_gem_request_put(fetch_and_zero(&workload->req)); + complete_current_workload(gvt, ring_id); + if (need_force_wake) intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); From 440a9b9fae37dfd7e4c7d76db34fada57f9afd92 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 5 Jan 2017 16:49:03 +0800 Subject: [PATCH 09/14] drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done The vgpu->running_workload_num is used to determine whether a vgpu has any workload running or not. So we should make sure the workload is really done before we dec running_workload_num. Function complete_current_workload is not the right place to do it, since this function is still processing the workload. This patch move the dec op afterward. v2: move dec op before wake_up(&scheduler->workload_complete_wq) (Min He) Signed-off-by: Changbin Du Reviewed-by: Min He Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c694dd039f3bb..e91885dffeff8 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); } From 59c0573dfbd5f66e3aa54c2ce0bebcb0953d4db4 Mon Sep 17 00:00:00 2001 From: Jike Song Date: Fri, 6 Jan 2017 15:16:21 +0800 Subject: [PATCH 10/14] drm/i915/gvt: init/destroy vgpu_idr properly An idr should be initialized before use and destroyed afterwards. Signed-off-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd9..e6bf5c533fbe5 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_clean_vgpu_types(gvt); + idr_destroy(&gvt->vgpu_idr); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("init gvt device\n"); + idr_init(&gvt->vgpu_idr); + mutex_init(&gvt->lock); gvt->dev_priv = dev_priv; @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_setup_mmio_info(gvt); if (ret) - return ret; + goto out_clean_idr; ret = intel_gvt_load_firmware(gvt); if (ret) @@ -313,6 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); kfree(gvt); return ret; } From 4e5378918b5b96e6b93fcadf1ab84a8486ca60a1 Mon Sep 17 00:00:00 2001 From: Jike Song Date: Fri, 6 Jan 2017 15:16:22 +0800 Subject: [PATCH 11/14] drm/i915/gvt: destroy the allocated idr on vgpu creating failures Once idr_alloc gets called data is allocated within the idr list, if any error occurs afterwards, we should undo that by idr_remove on the error path. Signed-off-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/vgpu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 398abb98dd0a8..f0e86123e45b7 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -304,7 +304,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ret = setup_vgpu_mmio(vgpu); if (ret) - goto out_free_vgpu; + goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, param); if (ret) @@ -355,6 +355,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: clean_vgpu_mmio(vgpu); +out_clean_idr: + idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); mutex_unlock(&gvt->lock); From 03551e971f6e52c8dedd5741bf48631e65675759 Mon Sep 17 00:00:00 2001 From: Jike Song Date: Fri, 6 Jan 2017 15:16:23 +0800 Subject: [PATCH 12/14] drm/i915/gvt: cleanup opregion memory allocation code According to the spec, ACPI OpRegion must be placed at a physical address below 4G. That is, for a vGPU it must be associated with a GPA below 4G, but on host side, it doesn't matter where the backing pages actually are. So when allocating pages from host, the GFP_DMA32 flag is unnecessary. Also the allocation is from a sleepable context, so GFP_ATOMIC is also unnecessary. This patch also removes INTEL_GVT_OPREGION_PORDER and use get_order() instead. Signed-off-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/opregion.c | 8 ++++---- drivers/gpu/drm/i915/gvt/reg.h | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921770c6d..d9fb41ab71198 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | - GFP_DMA32 | __GFP_ZERO, - INTEL_GVT_OPREGION_PORDER); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) return -ENOMEM; @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, - INTEL_GVT_OPREGION_PORDER); + get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02b..fbd023a16f181 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -50,8 +50,7 @@ #define INTEL_GVT_OPREGION_PARM 0x204 #define INTEL_GVT_OPREGION_PAGES 2 -#define INTEL_GVT_OPREGION_PORDER 1 -#define INTEL_GVT_OPREGION_SIZE (2 * 4096) +#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) From 5753394b64a07dd502cb288a5fd52e71fb01fc5d Mon Sep 17 00:00:00 2001 From: Jike Song Date: Fri, 6 Jan 2017 15:16:20 +0800 Subject: [PATCH 13/14] drm/i915/gvt/kvmgt: return meaningful error for vgpu creating failure The vgpu_create() routine we called returns meaningful errors to indicate failures, so we'd better to pass it to our caller, the mdev framework, whereby the sysfs is able to tell userspace what happened. Signed-off-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07ae4872..0c9234a87a20b 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) struct intel_vgpu_type *type; struct device *pdev; void *gvt; + int ret; pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; @@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) if (!type) { gvt_err("failed to find type %s to create\n", kobject_name(kobj)); - return -EINVAL; + ret = -EINVAL; + goto out; } vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { - gvt_err("create intel vgpu failed\n"); - return -EINVAL; + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); + gvt_err("failed to create intel vgpu: %d\n", ret); + goto out; } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); @@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - return 0; + ret = 0; + +out: + return ret; } static int intel_vgpu_remove(struct mdev_device *mdev) From 9631739f8196ec80b5d9bf955f79b711490c0205 Mon Sep 17 00:00:00 2001 From: Jike Song Date: Mon, 9 Jan 2017 15:38:38 +0800 Subject: [PATCH 14/14] drm/i915/gvt: cleanup GFP flags In gvt, almost all memory allocations are in sleepable contexts. It's fault-prone to use GFP_ATOMIC everywhere. Replace it with GFP_KERNEL wherever possible. Signed-off-by: Jike Song Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 39 +++++++++++++--------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a32e59de0eff9..3cf0df0bb3918 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1369,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) info->gtt_entry_size; mem = kzalloc(mm->has_shadow_page_table ? mm->page_table_entry_size * 2 - : mm->page_table_entry_size, - GFP_ATOMIC); + : mm->page_table_entry_size, GFP_KERNEL); if (!mem) return -ENOMEM; mm->virtual_page_table = mem; @@ -1521,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm; int ret; - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto fail; @@ -1875,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; - struct page *scratch_pt; + void *scratch_pt; unsigned long mfn; int i; - void *p; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - p = kmap_atomic(scratch_pt); - mfn = intel_gvt_hypervisor_virt_to_mfn(p); + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); - kunmap_atomic(p); - __free_page(scratch_pt); + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); + free_page((unsigned long)scratch_pt); return -EFAULT; } gtt->scratch_pt[type].page_mfn = mfn; - gtt->scratch_pt[type].page = scratch_pt; + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, mfn); @@ -1907,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { @@ -1925,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, se.val64 |= PPAT_CACHED_INDEX; for (i = 0; i < page_entry_num; i++) - ops->set_entry(p, &se, i, false, 0, vgpu); + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - kunmap_atomic(p); - return 0; } @@ -2197,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; - void *page_addr; + void *page; gvt_dbg_core("init gtt\n"); @@ -2210,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } - gvt->gtt.scratch_ggtt_page = - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); - if (!gvt->gtt.scratch_ggtt_page) { + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) { gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); - page_addr = page_address(gvt->gtt.scratch_ggtt_page); - - gvt->gtt.scratch_ggtt_mfn = - intel_gvt_hypervisor_virt_to_mfn(page_addr); + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to translate scratch ggtt page\n"); __free_page(gvt->gtt.scratch_ggtt_page);