Skip to content

Commit

Permalink
drm/i915/gvt: Wean gvt off using dev_priv
Browse files Browse the repository at this point in the history
Teach gvt to use intel_gt directly as it currently assumes direct HW
access.

[Zhenyu: rebase, fix compiling]

Cc: Ding Zhuocheng <zhuocheng.ding@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200304032307.2983-3-zhenyuw@linux.intel.com
  • Loading branch information
Chris Wilson authored and Zhenyu Wang committed Mar 6, 2020
1 parent 8fde410 commit a61ac1e
Show file tree
Hide file tree
Showing 20 changed files with 239 additions and 238 deletions.
84 changes: 44 additions & 40 deletions drivers/gpu/drm/i915/gvt/aperture_gm.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
Expand All @@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}

mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_post(gt);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
Expand All @@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gt *gt = gvt->gt;
int ret;

ret = alloc_gm(vgpu, false);
Expand All @@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)

return 0;
out_free_aperture:
mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}

static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gt *gt = gvt->gt;

mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gt->ggtt->vm.mutex);
}

/**
Expand All @@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;

assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_rpm_wakelock_held(uncore->rpm);

if (drm_WARN_ON(&dev_priv->drm, fence >= vgpu_fence_sz(vgpu)))
if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;

reg = vgpu->fence.regs[fence];
if (drm_WARN_ON(&dev_priv->drm, !reg))
if (drm_WARN_ON(&i915->drm, !reg))
return;

fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);

I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
intel_uncore_write(uncore, fence_reg_lo, 0);
intel_uncore_posting_read(uncore, fence_reg_lo);

I915_WRITE(fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo);
intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
intel_uncore_posting_read(uncore, fence_reg_lo);
}

static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
Expand All @@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
u32 i;

if (drm_WARN_ON(&dev_priv->drm, !vgpu_fence_sz(vgpu)))
if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;

intel_runtime_pm_get(&dev_priv->runtime_pm);
wakeref = intel_runtime_pm_get(uncore->rpm);

mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->ggtt.vm.mutex);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);

intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
intel_runtime_pm_put(uncore->rpm, wakeref);
}

static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
intel_wakeref_t wakeref;
int i;

intel_runtime_pm_get(rpm);
wakeref = intel_runtime_pm_get(uncore->rpm);

/* Request fences from host */
mutex_lock(&dev_priv->ggtt.vm.mutex);
mutex_lock(&gvt->gt->ggtt->vm.mutex);

for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(&dev_priv->ggtt);
reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;

Expand All @@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)

_clear_vgpu_fence(vgpu);

mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;

out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
Expand All @@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}

Expand Down Expand Up @@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt;
intel_wakeref_t wakeref;

intel_runtime_pm_get(&dev_priv->runtime_pm);
_clear_vgpu_fence(vgpu);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
_clear_vgpu_fence(vgpu);
}

/**
Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/drm/i915/gvt/cfg_space.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;

if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
Expand Down Expand Up @@ -300,7 +300,7 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;

if (drm_WARN_ON(&i915->drm, bytes > 4))
Expand Down Expand Up @@ -396,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);

vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
pci_resource_len(gvt->gt->i915->drm.pdev, 2);

memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
Expand Down
9 changes: 4 additions & 5 deletions drivers/gpu/drm/i915/gvt/debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;

preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);

if (preg != vreg) {
Expand Down Expand Up @@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);

mmio_hw_access_pre(gvt->dev_priv);
mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
mmio_hw_access_post(gvt->dev_priv);
mmio_hw_access_post(gvt->gt);

spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
Expand Down Expand Up @@ -186,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
struct drm_minor *minor = gvt->dev_priv->drm.primary;
struct drm_minor *minor = gvt->gt->i915->drm.primary;

gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);

Expand Down
22 changes: 11 additions & 11 deletions drivers/gpu/drm/i915/gvt/display.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)

static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;

if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
Expand All @@ -69,7 +69,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)

int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;

if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
Expand Down Expand Up @@ -169,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {

static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe;

if (IS_BROXTON(dev_priv)) {
Expand Down Expand Up @@ -320,7 +320,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);

if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
Expand Down Expand Up @@ -391,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)

static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
Expand Down Expand Up @@ -423,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe;

mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
Expand Down Expand Up @@ -456,11 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;

/* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
Expand All @@ -486,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;

if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
Expand All @@ -508,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;

intel_vgpu_init_i2c_edid(vgpu);

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/gvt/dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,

int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
Expand Down Expand Up @@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
Expand Down
Loading

0 comments on commit a61ac1e

Please sign in to comment.