Skip to content

Commit

Permalink
Merge tag 'gvt-next-2020-03-10' of https://github.com/intel/gvt-linux
Browse files Browse the repository at this point in the history
…into drm-intel-next-queued

gvt-next-2020-03-10

- Fix CFL dmabuf display after vfio edid enabling (Tina)
- Clean up scan non-priv batch debugfs entry (Chris)
- Use intel engines initialized in gvt, cleanup previous ring id (Chris)
- Use intel_gt instead (Chris)

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200310081928.GG28483@zhen-hp.sh.intel.com
  • Loading branch information
Rodrigo Vivi committed Mar 10, 2020
2 parents 765e7cd + a61ac1e commit 75e675f
Showing 25 changed files with 601 additions and 678 deletions.
84 changes: 44 additions & 40 deletions drivers/gpu/drm/i915/gvt/aperture_gm.c
Original file line number Diff line number Diff line change
@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}

mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_post(gt);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gt *gt = gvt->gt;
int ret;

ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)

return 0;
out_free_aperture:
mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}

static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gt *gt = gvt->gt;

mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gt->ggtt->vm.mutex);
}

/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;

assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_rpm_wakelock_held(uncore->rpm);

if (drm_WARN_ON(&dev_priv->drm, fence >= vgpu_fence_sz(vgpu)))
if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;

reg = vgpu->fence.regs[fence];
if (drm_WARN_ON(&dev_priv->drm, !reg))
if (drm_WARN_ON(&i915->drm, !reg))
return;

fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);

I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
intel_uncore_write(uncore, fence_reg_lo, 0);
intel_uncore_posting_read(uncore, fence_reg_lo);

I915_WRITE(fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo);
intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
intel_uncore_posting_read(uncore, fence_reg_lo);
}

static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
u32 i;

if (drm_WARN_ON(&dev_priv->drm, !vgpu_fence_sz(vgpu)))
if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;

intel_runtime_pm_get(&dev_priv->runtime_pm);
wakeref = intel_runtime_pm_get(uncore->rpm);

mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);

intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
intel_runtime_pm_put(uncore->rpm, wakeref);
}

static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
int i;

intel_runtime_pm_get(rpm);
wakeref = intel_runtime_pm_get(uncore->rpm);

/* Request fences from host */
mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gvt->gt->ggtt->vm.mutex);

for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(&dev_priv->ggtt);
reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;

@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)

_clear_vgpu_fence(vgpu);

mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;

out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}

@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt;
intel_wakeref_t wakeref;

intel_runtime_pm_get(&dev_priv->runtime_pm);
_clear_vgpu_fence(vgpu);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
_clear_vgpu_fence(vgpu);
}

/**
8 changes: 4 additions & 4 deletions drivers/gpu/drm/i915/gvt/cfg_space.c
Original file line number Diff line number Diff line change
@@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;

if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
@@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;

if (drm_WARN_ON(&i915->drm, bytes > 4))
@@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);

vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
pci_resource_len(gvt->gt->i915->drm.pdev, 2);

memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
Loading

0 comments on commit 75e675f

Please sign in to comment.