From d315d4faf82092df6fe82f456fd26dc8b247b627 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 19 Mar 2019 21:42:33 +0000
Subject: [PATCH 01/95] drm/i915/selftests: Provide stub reset functions

If a test fails, we quite often mark the device as wedged. Provide the
stub functions so that we can wedge the mock device, and avoid exploding
on test failures.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109981
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319214233.25498-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/mock_engine.c | 36 ++++++++++++++++++++
 1 file changed, 36 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 639d36eb904a8..61744819172ba 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -198,6 +198,37 @@ static void mock_submit_request(struct i915_request *request)
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+{
+	GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_cancel_requests(struct intel_engine_cs *engine)
+{
+	struct i915_request *request;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->timeline.lock, flags);
+
+	/* Mark all submitted requests as skipped. */
+	list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+		if (!i915_request_signaled(request))
+			dma_fence_set_error(&request->fence, -EIO);
+
+		i915_request_mark_complete(request);
+	}
+
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 				    const char *name,
 				    int id)
@@ -223,6 +254,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
 	engine->base.submit_request = mock_submit_request;
 
+	engine->base.reset.prepare = mock_reset_prepare;
+	engine->base.reset.reset = mock_reset;
+	engine->base.reset.finish = mock_reset_finish;
+	engine->base.cancel_requests = mock_cancel_requests;
+
 	if (i915_timeline_init(i915,
 			       &engine->base.timeline,
 			       engine->base.name,

From fd79d93985e08f29535c07337772a7da3159bda5 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 20 Mar 2019 08:00:52 +0000
Subject: [PATCH 02/95] drm/i915/selftests: add test to verify get/put fw
 domains

Exercise acquiring and releasing forcewake around register reads. In
order to read a register behind a GT powerwell, we need to instruct that
powerwell to wake up using a forcewake. When we no longer require the GT
powerwell, we tell the GT to release our forcewake. Inside the
forcewake, the register read should work but outside it should just
return garbage, 0 being the most common garbage. Thus we can detect when
we are inside and outside of the forcewake with just a simple register
read, and so can verify that the GT powerwell is released when we say
so.

v2: Picking the right forcewaked register to return 0 outside of
forcewake is an art.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190320080052.27273-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_uncore.c | 134 +++++++++++++++++-
 1 file changed, 128 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 81d9d31042a9c..099486235936b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -119,9 +119,130 @@ int intel_uncore_mock_selftests(void)
 	return 0;
 }
 
-static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+static int live_forcewake_ops(void *arg)
+{
+	static const struct reg {
+		const char *name;
+		unsigned long platforms;
+		unsigned int offset;
+	} registers[] = {
+		{
+			"RING_START",
+			INTEL_GEN_MASK(6, 7),
+			0x38,
+		},
+		{
+			"RING_MI_MODE",
+			INTEL_GEN_MASK(8, BITS_PER_LONG),
+			0x9c,
+		}
+	};
+	const struct reg *r;
+	struct drm_i915_private *i915 = arg;
+	struct intel_uncore_forcewake_domain *domain;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	unsigned int tmp;
+	int err = 0;
+
+	GEM_BUG_ON(i915->gt.awake);
+
+	/* vlv/chv with their pcu behave differently wrt reads */
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		pr_debug("PCU fakes forcewake badly; skipping\n");
+		return 0;
+	}
+
+	/* We have to pick carefully to get the exact behaviour we need */
+	for (r = registers; r->name; r++)
+		if (r->platforms & INTEL_INFO(i915)->gen_mask)
+			break;
+	if (!r->name) {
+		pr_debug("Forcewaked register not known for %s; skipping\n",
+			 intel_platform_name(INTEL_INFO(i915)->platform));
+		return 0;
+	}
+
+	wakeref = intel_runtime_pm_get(i915);
+
+	for_each_fw_domain(domain, i915, tmp) {
+		smp_store_mb(domain->active, false);
+		if (!hrtimer_cancel(&domain->timer))
+			continue;
+
+		intel_uncore_fw_release_timer(&domain->timer);
+	}
+
+	for_each_engine(engine, i915, id) {
+		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+		u32 __iomem *reg = i915->regs + engine->mmio_base + r->offset;
+		enum forcewake_domains fw_domains;
+		u32 val;
+
+		if (!engine->default_state)
+			continue;
+
+		fw_domains = intel_uncore_forcewake_for_reg(i915, mmio,
+							    FW_REG_READ);
+		if (!fw_domains)
+			continue;
+
+		for_each_fw_domain_masked(domain, fw_domains, i915, tmp) {
+			if (!domain->wake_count)
+				continue;
+
+			pr_err("fw_domain %s still active, aborting test!\n",
+			       intel_uncore_forcewake_domain_to_str(domain->id));
+			err = -EINVAL;
+			goto out_rpm;
+		}
+
+		intel_uncore_forcewake_get(i915, fw_domains);
+		val = readl(reg);
+		intel_uncore_forcewake_put(i915, fw_domains);
+
+		/* Flush the forcewake release (delayed onto a timer) */
+		for_each_fw_domain_masked(domain, fw_domains, i915, tmp) {
+			smp_store_mb(domain->active, false);
+			if (hrtimer_cancel(&domain->timer))
+				intel_uncore_fw_release_timer(&domain->timer);
+
+			preempt_disable();
+			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+			preempt_enable();
+			if (err) {
+				pr_err("Failed to clear fw_domain %s\n",
+				       intel_uncore_forcewake_domain_to_str(domain->id));
+				goto out_rpm;
+			}
+		}
+
+		if (!val) {
+			pr_err("%s:%s was zero while fw was held!\n",
+			       engine->name, r->name);
+			err = -EINVAL;
+			goto out_rpm;
+		}
+
+		/* We then expect the read to return 0 outside of the fw */
+		if (wait_for(readl(reg) == 0, 100)) {
+			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+			       engine->name, r->name, readl(reg), fw_domains);
+			err = -ETIMEDOUT;
+			goto out_rpm;
+		}
+	}
+
+out_rpm:
+	intel_runtime_pm_put(i915, wakeref);
+	return err;
+}
+
+static int live_forcewake_domains(void *arg)
 {
 #define FW_RANGE 0x40000
+	struct drm_i915_private *dev_priv = arg;
 	unsigned long *valid;
 	u32 offset;
 	int err;
@@ -179,6 +300,11 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
 
 int intel_uncore_live_selftests(struct drm_i915_private *i915)
 {
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_forcewake_ops),
+		SUBTEST(live_forcewake_domains),
+	};
+
 	int err;
 
 	/* Confirm the table we load is still valid */
@@ -188,9 +314,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
 	if (err)
 		return err;
 
-	err = intel_uncore_check_forcewake_domains(i915);
-	if (err)
-		return err;
-
-	return 0;
+	return i915_subtests(tests, i915);
 }

From 5c227869839b15a066c4808d3a88d36b79b35cd8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 18 Mar 2019 18:56:28 +0200
Subject: [PATCH 03/95] drm/i915: Reorder gen3/4 swizzle detection logic
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

g33/i964g/g45 are the exceptional cases when it comes to
the swizzle detection. Let's reorder the code to handle
them first and let everything else be handled by the
else branch. This allows us to unset .is_mobile for the
desktop PNV variant (which supposedly must follow the
"mobile" path here).

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318165633.28924-1-ville.syrjala@linux.intel.com
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c | 65 +++++++++++------------
 1 file changed, 32 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 65624b8e4d159..9418ad499b7e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -585,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (IS_MOBILE(dev_priv) ||
-		   IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
+	} else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 *
+		 * Reports indicate that the swizzling actually
+		 * varies depending upon page placement inside the
+		 * channels, i.e. we see swizzled pages where the
+		 * banks of memory are paired and unswizzled on the
+		 * uneven portion, so leave that as unknown.
+		 */
+		if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	} else {
 		u32 dcc;
 
 		/* On 9xx chipsets, channel interleave by the CPU is
@@ -636,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
 			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 		}
-	} else {
-		/* The 965, G33, and newer, have a very flexible memory
-		 * configuration.  It will enable dual-channel mode
-		 * (interleaving) on as much memory as it can, and the GPU
-		 * will additionally sometimes enable different bit 6
-		 * swizzling for tiled objects from the CPU.
-		 *
-		 * Here's what I found on the G965:
-		 *    slot fill         memory size  swizzling
-		 * 0A   0B   1A   1B    1-ch   2-ch
-		 * 512  0    0    0     512    0     O
-		 * 512  0    512  0     16     1008  X
-		 * 512  0    0    512   16     1008  X
-		 * 0    512  0    512   16     1008  X
-		 * 1024 1024 1024 0     2048   1024  O
-		 *
-		 * We could probably detect this based on either the DRB
-		 * matching, which was the case for the swizzling required in
-		 * the table above, or from the 1-ch value being less than
-		 * the minimum size of a rank.
-		 *
-		 * Reports indicate that the swizzling actually
-		 * varies depending upon page placement inside the
-		 * channels, i.e. we see swizzled pages where the
-		 * banks of memory are paired and unswizzled on the
-		 * uneven portion, so leave that as unknown.
-		 */
-		if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
-			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-			swizzle_y = I915_BIT_6_SWIZZLE_9;
-		}
 	}
 
 	if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||

From b7c8093fac15b253cab8c1060d0d5543a8739ed0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 19 Mar 2019 16:23:29 +0200
Subject: [PATCH 04/95] drm/i915: Introduce i9xx_has_pfit()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make the code self-documenting by introducing i9xx_has_pfit().
Also make PNV an exceptional case so that we can unset
.is_mobile for the desktop variant.

v2: s/gen4/gen>=4/ (Tvrtko)

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319142329.22881-1-ville.syrjala@linux.intel.com
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index eadc639e99acd..713d2651219ee 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7962,14 +7962,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
 	return 0;
 }
 
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+	if (IS_I830(dev_priv))
+		return false;
+
+	return INTEL_GEN(dev_priv) >= 4 ||
+		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 				 struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 tmp;
 
-	if (INTEL_GEN(dev_priv) <= 3 &&
-	    (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
+	if (!i9xx_has_pfit(dev_priv))
 		return;
 
 	tmp = I915_READ(PFIT_CONTROL);

From 9e7d569924d9b9b17734789496ac78d4f7eaab7c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 18 Mar 2019 18:56:30 +0200
Subject: [PATCH 05/95] drm/i915: Introduce i9xx_has_pps()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add a small helper to determine if we have the panel power
sequencer or not. We'll make PNV an exceptional case so
that we can unset .is_mobile for the desktop variant.

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318165633.28924-3-ville.syrjala@linux.intel.com
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 713d2651219ee..01cdd6e745c3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1442,6 +1442,14 @@ static void chv_enable_pll(struct intel_crtc *crtc,
 	}
 }
 
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+{
+	if (IS_I830(dev_priv))
+		return false;
+
+	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
 static void i9xx_enable_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *crtc_state)
 {
@@ -1453,7 +1461,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 	assert_pipe_disabled(dev_priv, crtc->pipe);
 
 	/* PLL is protected by panel, make sure we can write it */
-	if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
+	if (i9xx_has_pps(dev_priv))
 		assert_panel_unlocked(dev_priv, crtc->pipe);
 
 	/*

From f3e304852242f5a5da6ab47798f4aca044786885 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 18 Mar 2019 18:56:31 +0200
Subject: [PATCH 06/95] drm/i915: Introduce i915_has_asle()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to allow the desktop PNV to not have .is_mobile set. To
that end let's add a small helper to determine if the platform
has the ASLE interrupt (or equivalent). Supposdely both PNV
variants have it.

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318165633.28924-4-ville.syrjala@linux.intel.com
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 82d487189a340..1375bba455487 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -748,13 +748,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 	POSTING_READ(reg);
 }
 
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->opregion.asle)
+		return false;
+
+	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
 /**
  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  * @dev_priv: i915 device private
  */
 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 {
-	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
+	if (!i915_has_asle(dev_priv))
 		return;
 
 	spin_lock_irq(&dev_priv->irq_lock);

From 4677faf65fd142d97d492133b8483d020f2ddecc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 18 Mar 2019 18:56:32 +0200
Subject: [PATCH 07/95] drm/i915: Use HPLLVCO_MOBILE for all PNVs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

To allow unsetting .is_mobile for the desktop variant
of PNV fix up the cdclk code to select the mobile HPLLVCO register
for both PNV variants.

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190318165633.28924-5-ville.syrjala@linux.intel.com
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_cdclk.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 21fb4e0d6c4e6..d27ccd23d753a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -234,7 +234,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
 	else
 		return 0;
 
-	tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+	tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+			HPLLVCO_MOBILE : HPLLVCO);
 
 	vco = vco_table[tmp & 0x7];
 	if (vco == 0)

From 3cf963cfe3a9fc83bc235b1892858164b1b83306 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:36 +0200
Subject: [PATCH 08/95] drm/i915: Accept alloc_size == blocks
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If the minimum required ddb space for all the planes equals the
total ddb space available we are allowed to use the relevant
watermark level.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-2-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d73b13ca57a03..07351ceb567bb 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4378,7 +4378,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 			blocks += wm->uv_wm[level].min_ddb_alloc;
 		}
 
-		if (blocks < alloc_size) {
+		if (blocks <= alloc_size) {
 			alloc_size -= blocks;
 			break;
 		}

From 67155a6996841b5167c441bff2724ca86fdd0a2b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:37 +0200
Subject: [PATCH 09/95] drm/i915: Don't pass plane state to
 skl_compute_plane_wm()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

skl_compute_plane_wm() doesn't actually need the plane state. While
it would make logically sense to pass it, we shall need to reuse
skl_compute_plane_wm() to compute the minimum ddb allocation for
the cursor before the cursor may be enabled. Thus we can't rely
on the plane state. The alternative would be to duplicate a lot of
the wm calculations for the cursor ddb allocation case, which doens't
appeal to me.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-3-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 07351ceb567bb..971f8ab127be0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4695,14 +4695,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
 }
 
 static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
-				 const struct intel_plane_state *intel_pstate,
 				 int level,
 				 const struct skl_wm_params *wp,
 				 const struct skl_wm_level *result_prev,
 				 struct skl_wm_level *result /* out */)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(intel_pstate->base.plane->dev);
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
 	u32 latency = dev_priv->wm.skl_latency[level];
 	uint_fixed_16_16_t method1, method2;
 	uint_fixed_16_16_t selected_result;
@@ -4821,19 +4819,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
 
 static void
 skl_compute_wm_levels(const struct intel_crtc_state *cstate,
-		      const struct intel_plane_state *intel_pstate,
 		      const struct skl_wm_params *wm_params,
 		      struct skl_wm_level *levels)
 {
-	struct drm_i915_private *dev_priv =
-		to_i915(intel_pstate->base.plane->dev);
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
 	struct skl_wm_level *result_prev = &levels[0];
 
 	for (level = 0; level <= max_level; level++) {
 		struct skl_wm_level *result = &levels[level];
 
-		skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+		skl_compute_plane_wm(cstate, level, wm_params,
 				     result_prev, result);
 
 		result_prev = result;
@@ -4930,7 +4926,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
 	skl_compute_transition_wm(crtc_state, &wm_params, wm);
 
 	return 0;
@@ -4952,7 +4948,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
+	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
 
 	return 0;
 }

From c92558aa41722f95160d96b8e1ac8dd36731a836 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:38 +0200
Subject: [PATCH 10/95] drm/i915: Extract skl_compute_wm_params()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Extract the meat of skl_compute_plane_wm_params() into a lower
level helper that doesn't depend on the plane state. We'll
reuse this for the cursor ddb allocation calculations.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-4-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 84 ++++++++++++++++++++-------------
 1 file changed, 50 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 971f8ab127be0..9e7b4412f7a81 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4584,57 +4584,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
 }
 
 static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
-			    const struct intel_plane_state *intel_pstate,
-			    struct skl_wm_params *wp, int color_plane)
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+		      int width, const struct drm_format_info *format,
+		      u64 modifier, unsigned int rotation,
+		      u32 plane_pixel_rate, struct skl_wm_params *wp,
+		      int color_plane)
 {
-	struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_plane_state *pstate = &intel_pstate->base;
-	const struct drm_framebuffer *fb = pstate->fb;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 interm_pbpl;
 
 	/* only planar format has two planes */
-	if (color_plane == 1 && !is_planar_yuv_format(fb->format->format)) {
+	if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
 		DRM_DEBUG_KMS("Non planar format have single plane\n");
 		return -EINVAL;
 	}
 
-	wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
-		      fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
-		      fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-		      fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-	wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
-	wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
-			 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-	wp->is_planar = is_planar_yuv_format(fb->format->format);
-
-	if (plane->id == PLANE_CURSOR) {
-		wp->width = intel_pstate->base.crtc_w;
-	} else {
-		/*
-		 * Src coordinates are already rotated by 270 degrees for
-		 * the 90/270 degree plane rotation cases (to match the
-		 * GTT mapping), hence no need to account for rotation here.
-		 */
-		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
-	}
+	wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+		      modifier == I915_FORMAT_MOD_Yf_TILED ||
+		      modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+		      modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+	wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+			 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+	wp->is_planar = is_planar_yuv_format(format->format);
 
+	wp->width = width;
 	if (color_plane == 1 && wp->is_planar)
 		wp->width /= 2;
 
-	wp->cpp = fb->format->cpp[color_plane];
-	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
-							     intel_pstate);
+	wp->cpp = format->cpp[color_plane];
+	wp->plane_pixel_rate = plane_pixel_rate;
 
 	if (INTEL_GEN(dev_priv) >= 11 &&
-	    fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
 		wp->dbuf_block_size = 256;
 	else
 		wp->dbuf_block_size = 512;
 
-	if (drm_rotation_90_or_270(pstate->rotation)) {
-
+	if (drm_rotation_90_or_270(rotation)) {
 		switch (wp->cpp) {
 		case 1:
 			wp->y_min_scanlines = 16;
@@ -4679,12 +4667,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
 
 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
 					     wp->plane_blocks_per_line);
+
 	wp->linetime_us = fixed16_to_u32_round_up(
-					intel_get_linetime_us(cstate));
+					intel_get_linetime_us(crtc_state));
 
 	return 0;
 }
 
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+			    const struct intel_plane_state *plane_state,
+			    struct skl_wm_params *wp, int color_plane)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	int width;
+
+	if (plane->id == PLANE_CURSOR) {
+		width = plane_state->base.crtc_w;
+	} else {
+		/*
+		 * Src coordinates are already rotated by 270 degrees for
+		 * the 90/270 degree plane rotation cases (to match the
+		 * GTT mapping), hence no need to account for rotation here.
+		 */
+		width = drm_rect_width(&plane_state->base.src) >> 16;
+	}
+
+	return skl_compute_wm_params(crtc_state, width,
+				     fb->format, fb->modifier,
+				     plane_state->base.rotation,
+				     skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
+				     wp, color_plane);
+}
+
 static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
 {
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))

From df331de3f8aac1a8080510d883fa40555809cddf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 19 Mar 2019 18:03:11 +0200
Subject: [PATCH 11/95] drm/i915: Allocate enough DDB for the cursor
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Currently we just assume that 32 or 8 blocks of ddb is sufficient
for the cursor. The 32 might be, but the 8 is certainly not. The
minimum we need is at least what level 0 watermarks need, but that
is a bit restrictive, so instead let's calculate what level 7
would need for a 256x256 cursor. We'll use that to determine the
fixed ddb allocation for the cursor. This way the cursor will never
be responsible for missing out on deeper power saving states.

v2: Loop to make sure this works even if some wm levels are
    totally disabled (latency==0)

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20190319160311.23529-1-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 41 +++++++++++++++++++++++++++++----
 1 file changed, 36 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9e7b4412f7a81..b35a55a59680f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3924,12 +3924,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
 }
 
-static unsigned int skl_cursor_allocation(int num_active)
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+				 int width, const struct drm_format_info *format,
+				 u64 modifier, unsigned int rotation,
+				 u32 plane_pixel_rate, struct skl_wm_params *wp,
+				 int color_plane);
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+				 int level,
+				 const struct skl_wm_params *wp,
+				 const struct skl_wm_level *result_prev,
+				 struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+		      int num_active)
 {
-	if (num_active == 1)
-		return 32;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	int level, max_level = ilk_wm_max_level(dev_priv);
+	struct skl_wm_level wm = {};
+	int ret, min_ddb_alloc = 0;
+	struct skl_wm_params wp;
+
+	ret = skl_compute_wm_params(crtc_state, 256,
+				    drm_format_info(DRM_FORMAT_ARGB8888),
+				    DRM_FORMAT_MOD_LINEAR,
+				    DRM_MODE_ROTATE_0,
+				    crtc_state->pixel_rate, &wp, 0);
+	WARN_ON(ret);
+
+	for (level = 0; level <= max_level; level++) {
+		skl_compute_plane_wm(crtc_state, 7, &wp, &wm, &wm);
+		if (wm.min_ddb_alloc == U16_MAX)
+			break;
+
+		min_ddb_alloc = wm.min_ddb_alloc;
+	}
 
-	return 8;
+	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
 }
 
 static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
@@ -4354,7 +4385,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		return 0;
 
 	/* Allocate fixed number of blocks for cursor. */
-	total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+	total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
 	alloc_size -= total[PLANE_CURSOR];
 	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
 		alloc->end - total[PLANE_CURSOR];

From 10a7e07b68b9ecf9251f78043fb7b342bf9b04e8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:40 +0200
Subject: [PATCH 12/95] drm/i915: Make sure cursor has enough ddb for the
 selected wm level
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We use a fixed ddb allocation for the cursor. Now the calculation
actually makes sure we have enough ddb space, but let's double check
anyway.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-6-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b35a55a59680f..d54b70db3f208 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4401,10 +4401,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
 		blocks = 0;
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-			if (plane_id == PLANE_CURSOR)
+			wm = &cstate->wm.skl.optimal.planes[plane_id];
+
+			if (plane_id == PLANE_CURSOR) {
+				if (WARN_ON(wm->wm[level].min_ddb_alloc >
+					    total[PLANE_CURSOR])) {
+					blocks = U32_MAX;
+					break;
+				}
 				continue;
+			}
 
-			wm = &cstate->wm.skl.optimal.planes[plane_id];
 			blocks += wm->wm[level].min_ddb_alloc;
 			blocks += wm->uv_wm[level].min_ddb_alloc;
 		}

From a301cb0fca2d4c5a615d5dac1a3d80baa99518ac Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:41 +0200
Subject: [PATCH 13/95] drm/i915: Keep plane watermarks enabled more
 aggressively
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Currently we disable all the watermarks above the selected max
level for every plane. That would mean that the cursor's watermarks
may also get modified when another plane causes the selected
max watermark level to change. That is not so great as we would
like to keep the cursor as indepenedent as possible to avoid
having to throttle it in resposne to other plane activity.

To avoid that let's keep the watermarks enabled even for levels
above the max selected watermark level, iff the plane has enough
ddb for that particular level. This way the cursor's enabled
watermarks only depend on the cursor itself. This is safe because
the hardware will never choose to use a watermark level unless
all enabled planes have also enabled that level.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-7-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d54b70db3f208..e7bc38fc4f9d9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4508,7 +4508,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 			wm = &cstate->wm.skl.optimal.planes[plane_id];
-			memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+
+			/*
+			 * We only disable the watermarks for each plane if
+			 * they exceed the ddb allocation of said plane. This
+			 * is done so that we don't end up touching cursor
+			 * watermarks needlessly when some other plane reduces
+			 * our max possible watermark level.
+			 *
+			 * Bspec has this to say about the PLANE_WM enable bit:
+			 * "All the watermarks at this level for all enabled
+			 *  planes must be enabled before the level will be used."
+			 * So this is actually safe to do.
+			 */
+			if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
+			    wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
+				memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
 
 			/*
 			 * Wa_1408961008:icl

From 5e6037c88ad5f052e317a56cff54df40b7e82e5f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:42 +0200
Subject: [PATCH 14/95] drm/i915: Move some variables to tighter scope
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Clean up skl_allocate_pipe_ddb() a bit by moving the 'wm' variable
to tighter scope. We'll also consitify it where appropriate.

Also initialize plane_alloc/uv_plane_alloc when decrlaring them
rather than later.

v2: Update commit message (Matt)

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-8-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e7bc38fc4f9d9..0b3e29eb0701e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4344,7 +4344,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
-	struct skl_plane_wm *wm;
 	u16 alloc_size, start = 0;
 	u16 total[I915_MAX_PLANES] = {};
 	u16 uv_total[I915_MAX_PLANES] = {};
@@ -4401,7 +4400,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
 		blocks = 0;
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-			wm = &cstate->wm.skl.optimal.planes[plane_id];
+			const struct skl_plane_wm *wm =
+				&cstate->wm.skl.optimal.planes[plane_id];
 
 			if (plane_id == PLANE_CURSOR) {
 				if (WARN_ON(wm->wm[level].min_ddb_alloc >
@@ -4435,6 +4435,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 * proportional to its relative data rate.
 	 */
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+		const struct skl_plane_wm *wm =
+			&cstate->wm.skl.optimal.planes[plane_id];
 		u64 rate;
 		u16 extra;
 
@@ -4448,8 +4450,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		if (total_data_rate == 0)
 			break;
 
-		wm = &cstate->wm.skl.optimal.planes[plane_id];
-
 		rate = plane_data_rate[plane_id];
 		extra = min_t(u16, alloc_size,
 			      DIV64_U64_ROUND_UP(alloc_size * rate,
@@ -4474,14 +4474,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	/* Set the actual DDB start/end points for each plane */
 	start = alloc->start;
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-		struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+		struct skl_ddb_entry *plane_alloc =
+			&cstate->wm.skl.plane_ddb_y[plane_id];
+		struct skl_ddb_entry *uv_plane_alloc =
+			&cstate->wm.skl.plane_ddb_uv[plane_id];
 
 		if (plane_id == PLANE_CURSOR)
 			continue;
 
-		plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
-		uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
-
 		/* Gen11+ uses a separate plane for UV watermarks */
 		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
 
@@ -4507,7 +4507,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 */
 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-			wm = &cstate->wm.skl.optimal.planes[plane_id];
+			struct skl_plane_wm *wm =
+				&cstate->wm.skl.optimal.planes[plane_id];
 
 			/*
 			 * We only disable the watermarks for each plane if
@@ -4543,7 +4544,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 * don't have enough DDB blocks for it.
 	 */
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-		wm = &cstate->wm.skl.optimal.planes[plane_id];
+		struct skl_plane_wm *wm =
+			&cstate->wm.skl.optimal.planes[plane_id];
+
 		if (wm->trans_wm.plane_res_b >= total[plane_id])
 			memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
 	}

From 96cb7cde1a3009663918b69ab5f8000c67f0a7b8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:43 +0200
Subject: [PATCH 15/95] drm/i915: Don't pass pipe_wm around so much
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

{skl,icl}_build_plane_wm() don't need to be passed the pipe_wm, so
don't. And skl_build_pipe_wm() can easily dig it out itself.

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-9-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0b3e29eb0701e..fca5cdd599e10 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5025,8 +5025,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
 	return 0;
 }
 
-static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
-			      struct intel_crtc_state *crtc_state,
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@@ -5052,8 +5051,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
 	return 0;
 }
 
-static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
-			      struct intel_crtc_state *crtc_state,
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
 	enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
@@ -5090,10 +5088,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
 	return 0;
 }
 
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
-			     struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
 {
 	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
 	struct drm_crtc_state *crtc_state = &cstate->base;
 	struct drm_plane *plane;
 	const struct drm_plane_state *pstate;
@@ -5110,11 +5108,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 						to_intel_plane_state(pstate);
 
 		if (INTEL_GEN(dev_priv) >= 11)
-			ret = icl_build_plane_wm(pipe_wm,
-						 cstate, intel_pstate);
+			ret = icl_build_plane_wm(cstate, intel_pstate);
 		else
-			ret = skl_build_plane_wm(pipe_wm,
-						 cstate, intel_pstate);
+			ret = skl_build_plane_wm(cstate, intel_pstate);
 		if (ret)
 			return ret;
 	}
@@ -5277,7 +5273,7 @@ static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
 	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
 	int ret;
 
-	ret = skl_build_pipe_wm(cstate, pipe_wm);
+	ret = skl_build_pipe_wm(cstate);
 	if (ret)
 		return ret;
 

From 8cac9fd9bc7967cd114c543bdf70a5d0aa759498 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 12 Mar 2019 22:58:44 +0200
Subject: [PATCH 16/95] drm/i915: Inline skl_update_pipe_wm() into its only
 caller
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

skl_update_pipe_wm() is quite pointless now. Just inline it into
skl_compute_wm().

v2: s/skl_build_pipe_wm/skl_update_pipe_wm/ in the commit message (Matt)

Cc: Neel Desai <neel.desai@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205844.6339-10-ville.syrjala@linux.intel.com
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 32 ++++++--------------------------
 1 file changed, 6 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fca5cdd599e10..53e9304c66ab8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5265,23 +5265,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
 	return false;
 }
 
-static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
-			      const struct skl_pipe_wm *old_pipe_wm,
-			      struct skl_pipe_wm *pipe_wm, /* out */
-			      bool *changed /* out */)
-{
-	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
-	int ret;
-
-	ret = skl_build_pipe_wm(cstate);
-	if (ret)
-		return ret;
-
-	*changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
-
-	return 0;
-}
-
 static u32
 pipes_modified(struct intel_atomic_state *state)
 {
@@ -5620,10 +5603,9 @@ static int
 skl_compute_wm(struct intel_atomic_state *state)
 {
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *cstate;
+	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc_state *old_crtc_state;
 	struct skl_ddb_values *results = &state->wm_results;
-	struct skl_pipe_wm *pipe_wm;
 	bool changed = false;
 	int ret, i;
 
@@ -5641,12 +5623,8 @@ skl_compute_wm(struct intel_atomic_state *state)
 	 * pipe allocations had to change.
 	 */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-					    cstate, i) {
-		const struct skl_pipe_wm *old_pipe_wm =
-			&old_crtc_state->wm.skl.optimal;
-
-		pipe_wm = &cstate->wm.skl.optimal;
-		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+					    new_crtc_state, i) {
+		ret = skl_build_pipe_wm(new_crtc_state);
 		if (ret)
 			return ret;
 
@@ -5654,7 +5632,9 @@ skl_compute_wm(struct intel_atomic_state *state)
 		if (ret)
 			return ret;
 
-		if (changed)
+		if (!skl_pipe_wm_equals(crtc,
+					&old_crtc_state->wm.skl.optimal,
+					&new_crtc_state->wm.skl.optimal))
 			results->dirty_pipes |= drm_crtc_mask(&crtc->base);
 	}
 

From 6e514e371757a62996881e94dd2a488b3e16d518 Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Mon, 4 Mar 2019 11:29:08 +0200
Subject: [PATCH 17/95] drm/i915: Switch to bitmap_zalloc()

Switch to bitmap_zalloc() to show clearly what we are allocating.
Besides that it returns pointer of bitmap type instead of opaque void *.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190304092908.57382-2-andriy.shevchenko@linux.intel.com
---
 drivers/gpu/drm/i915/i915_gem.c               | 2 +-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c     | 3 +--
 drivers/gpu/drm/i915/i915_gem_tiling.c        | 6 +++---
 drivers/gpu/drm/i915/selftests/intel_uncore.c | 5 ++---
 4 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b7086c8d47263..92389825a7ff3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4195,7 +4195,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		drm_gem_object_release(&obj->base);
 		i915_gem_info_remove_obj(i915, obj->base.size);
 
-		kfree(obj->bit_17);
+		bitmap_free(obj->bit_17);
 		i915_gem_object_free(obj);
 
 		GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 9418ad499b7e5..3084f52e33728 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -765,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
 	int i;
 
 	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
-				      sizeof(long), GFP_KERNEL);
+		obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
 		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 16cc9ddbce34a..a9b5329dae3bc 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 		if (!obj->bit_17) {
-			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
-					      sizeof(long), GFP_KERNEL);
+			obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+						    GFP_KERNEL);
 		}
 	} else {
-		kfree(obj->bit_17);
+		bitmap_free(obj->bit_17);
 		obj->bit_17 = NULL;
 	}
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 099486235936b..f9f5672b27c41 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -258,8 +258,7 @@ static int live_forcewake_domains(void *arg)
 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 		return 0;
 
-	valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
-			GFP_KERNEL);
+	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
 	if (!valid)
 		return -ENOMEM;
 
@@ -294,7 +293,7 @@ static int live_forcewake_domains(void *arg)
 		}
 	}
 
-	kfree(valid);
+	bitmap_free(valid);
 	return err;
 }
 

From 7264aebb81d15aa6bbed650c816bba90f026bc35 Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Tue, 19 Mar 2019 15:18:47 -0700
Subject: [PATCH 18/95] drm/i915/icl: Fix the TRANS_DDI_FUNC_CTL2 bitfield
 macro

This patch fixes the PORT_SYNC_MODE_MASTER_SELECT macro
to correctly do the left shifting to set the port sync
master select correctly.
I have tested this fix on ICL.

Fixes: 49edbd49786e ("drm/i915/icl: Define TRANS_DDI_FUNC_CTL DSI registers")
Cc: Madhav Chauhan <madhav.chauhan@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: <stable@vger.kernel.org> # v5.0+
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319221847.21311-1-manasi.d.navare@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 85e8d1a1f70b2..7a9d867eb49b8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9352,7 +9352,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)	_MMIO_TRANS2(tran, \
 						     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE			(1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)	((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)	((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK	(0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT	0
 

From 159367bb9e7439b8c1c4c066596a1663b901647b Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 20 Mar 2019 12:27:32 +0000
Subject: [PATCH 19/95] drm/i915: always use masks on FW regs

Upper bits are reserved on gen6, so no issue if we write them. Note that
we're already doing this in the non-MT case of IVB, which uses the same
register.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190320122732.14512-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_uncore.c | 44 +++++++++++------------------
 drivers/gpu/drm/i915/intel_uncore.h |  4 ---
 2 files changed, 16 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 7129eebc333bc..b4cea3cf3915b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -59,18 +59,19 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
 }
 
 #define fw_ack(d) readl((d)->reg_ack)
-#define fw_set(d, val) writel((val), (d)->reg_set)
+#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
 
 static inline void
-fw_domain_reset(const struct intel_uncore *uncore,
-		const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
 {
 	/*
 	 * We don't really know if the powerwell for the forcewake domain we are
 	 * trying to reset here does exist at this point (engines could be fused
 	 * off in ICL+), so no waiting for acks
 	 */
-	fw_set(d, uncore->fw_reset);
+	/* WaRsClearFWBitsAtReset:bdw,skl */
+	fw_clear(d, 0xffff);
 }
 
 static inline void
@@ -146,14 +147,14 @@ fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
 	do {
 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
 
-		fw_set(d, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
 		/* Give gt some time to relax before the polling frenzy */
 		udelay(10 * pass);
 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
 
 		ack_detected = (fw_ack(d) & ack_bit) == value;
 
-		fw_set(d, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
 	} while (!ack_detected && pass++ < 10);
 
 	DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
@@ -176,10 +177,9 @@ fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
 }
 
 static inline void
-fw_domain_get(const struct intel_uncore *uncore,
-	      const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
 {
-	fw_set(d, uncore->fw_set);
+	fw_set(d, FORCEWAKE_KERNEL);
 }
 
 static inline void
@@ -201,10 +201,9 @@ fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
 }
 
 static inline void
-fw_domain_put(const struct intel_uncore *uncore,
-	      const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
 {
-	fw_set(d, uncore->fw_clear);
+	fw_clear(d, FORCEWAKE_KERNEL);
 }
 
 static void
@@ -218,7 +217,7 @@ fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
 		fw_domain_wait_ack_clear(d);
-		fw_domain_get(uncore, d);
+		fw_domain_get(d);
 	}
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
@@ -239,7 +238,7 @@ fw_domains_get_with_fallback(struct drm_i915_private *i915,
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
 		fw_domain_wait_ack_clear_fallback(d);
-		fw_domain_get(uncore, d);
+		fw_domain_get(d);
 	}
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
@@ -258,7 +257,7 @@ fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_put(uncore, d);
+		fw_domain_put(d);
 
 	uncore->fw_domains_active &= ~fw_domains;
 }
@@ -277,7 +276,7 @@ fw_domains_reset(struct drm_i915_private *i915,
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
-		fw_domain_reset(uncore, d);
+		fw_domain_reset(d);
 }
 
 static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
@@ -1371,7 +1370,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 
 	uncore->fw_domains |= BIT(domain_id);
 
-	fw_domain_reset(uncore, d);
+	fw_domain_reset(d);
 }
 
 static void fw_domain_fini(struct drm_i915_private *dev_priv,
@@ -1396,17 +1395,6 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
 		return;
 
-	if (IS_GEN(dev_priv, 6)) {
-		dev_priv->uncore.fw_reset = 0;
-		dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
-		dev_priv->uncore.fw_clear = 0;
-	} else {
-		/* WaRsClearFWBitsAtReset:bdw,skl */
-		dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
-		dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
-		dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
-	}
-
 	if (INTEL_GEN(dev_priv) >= 11) {
 		int i;
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b0a95469babff..579a7f6b84453 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -106,10 +106,6 @@ struct intel_uncore {
 	enum forcewake_domains fw_domains_active;
 	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
 
-	u32 fw_set;
-	u32 fw_clear;
-	u32 fw_reset;
-
 	struct intel_uncore_forcewake_domain {
 		enum forcewake_domain_id id;
 		enum forcewake_domains mask;

From f568eeee535597dfcf7b84169b135ada8696ba59 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:35 -0700
Subject: [PATCH 20/95] drm/i915: use intel_uncore in fw get/put internal paths

Get/put functions used outside of uncore.c are updated in the next
patch for a nicer split.

v2: use dev_priv where we still have it (Paulo)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-3-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c           |   5 +-
 drivers/gpu/drm/i915/i915_drv.c               |   2 +-
 drivers/gpu/drm/i915/i915_drv.h               |   5 +
 drivers/gpu/drm/i915/intel_uncore.c           | 191 +++++++++---------
 drivers/gpu/drm/i915/intel_uncore.h           |  17 +-
 drivers/gpu/drm/i915/selftests/intel_uncore.c |   9 +-
 6 files changed, 121 insertions(+), 108 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 08683dca77750..a52b7cf1525d7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1414,13 +1414,14 @@ static int ironlake_drpc_info(struct seq_file *m)
 static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *fw_domain;
 	unsigned int tmp;
 
 	seq_printf(m, "user.bypass_count = %u\n",
-		   i915->uncore.user_forcewake.count);
+		   uncore->user_forcewake.count);
 
-	for_each_fw_domain(fw_domain, i915, tmp)
+	for_each_fw_domain(fw_domain, uncore, tmp)
 		seq_printf(m, "%s.wake_count = %u\n",
 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
 			   READ_ONCE(fw_domain->wake_count));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b3d2524ce7cc8..4ec41c3a90388 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2952,7 +2952,7 @@ static int intel_runtime_suspend(struct device *kdev)
 		intel_opregion_notify_adapter(dev_priv, PCI_D1);
 	}
 
-	assert_forcewakes_inactive(dev_priv);
+	assert_forcewakes_inactive(&dev_priv->uncore);
 
 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		intel_hpd_poll_init(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 363b2d3e4d50b..b9ac9183fbffa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2102,6 +2102,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
 	return container_of(huc, struct drm_i915_private, huc);
 }
 
+static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
+{
+	return container_of(uncore, struct drm_i915_private, uncore);
+}
+
 /* Simple iterator over all initialised engines */
 #define for_each_engine(engine__, dev_priv__, id__) \
 	for ((id__) = 0; \
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b4cea3cf3915b..ebbb9af970dc7 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -207,66 +207,62 @@ fw_domain_put(const struct intel_uncore_forcewake_domain *d)
 }
 
 static void
-fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
 		fw_domain_wait_ack_clear(d);
 		fw_domain_get(d);
 	}
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 		fw_domain_wait_ack_set(d);
 
 	uncore->fw_domains_active |= fw_domains;
 }
 
 static void
-fw_domains_get_with_fallback(struct drm_i915_private *i915,
+fw_domains_get_with_fallback(struct intel_uncore *uncore,
 			     enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
 		fw_domain_wait_ack_clear_fallback(d);
 		fw_domain_get(d);
 	}
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 		fw_domain_wait_ack_set_fallback(d);
 
 	uncore->fw_domains_active |= fw_domains;
 }
 
 static void
-fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 		fw_domain_put(d);
 
 	uncore->fw_domains_active &= ~fw_domains;
 }
 
 static void
-fw_domains_reset(struct drm_i915_private *i915,
+fw_domains_reset(struct intel_uncore *uncore,
 		 enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_uncore_forcewake_domain *d;
 	unsigned int tmp;
 
@@ -275,7 +271,7 @@ fw_domains_reset(struct drm_i915_private *i915,
 
 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
-	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
 		fw_domain_reset(d);
 }
 
@@ -299,13 +295,13 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 		  "GT thread status wait timed out\n");
 }
 
-static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
 					      enum forcewake_domains fw_domains)
 {
-	fw_domains_get(dev_priv, fw_domains);
+	fw_domains_get(uncore, fw_domains);
 
 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
-	__gen6_gt_wait_for_thread_c0(dev_priv);
+	__gen6_gt_wait_for_thread_c0(uncore_to_i915(uncore));
 }
 
 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
@@ -343,30 +339,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 {
 	struct intel_uncore_forcewake_domain *domain =
 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
-	struct drm_i915_private *dev_priv =
-		container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
+	struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
 	unsigned long irqflags;
 
-	assert_rpm_device_not_suspended(dev_priv);
+	assert_rpm_device_not_suspended(uncore_to_i915(uncore));
 
 	if (xchg(&domain->active, false))
 		return HRTIMER_RESTART;
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&uncore->lock, irqflags);
 	if (WARN_ON(domain->wake_count == 0))
 		domain->wake_count++;
 
 	if (--domain->wake_count == 0)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+		uncore->funcs.force_wake_put(uncore, domain->mask);
 
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&uncore->lock, irqflags);
 
 	return HRTIMER_NORESTART;
 }
 
 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
 static unsigned int
-intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
+intel_uncore_forcewake_reset(struct intel_uncore *uncore)
 {
 	unsigned long irqflags;
 	struct intel_uncore_forcewake_domain *domain;
@@ -384,7 +379,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
 
 		active_domains = 0;
 
-		for_each_fw_domain(domain, dev_priv, tmp) {
+		for_each_fw_domain(domain, uncore, tmp) {
 			smp_store_mb(domain->active, false);
 			if (hrtimer_cancel(&domain->timer) == 0)
 				continue;
@@ -392,9 +387,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
 			intel_uncore_fw_release_timer(&domain->timer);
 		}
 
-		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+		spin_lock_irqsave(&uncore->lock, irqflags);
 
-		for_each_fw_domain(domain, dev_priv, tmp) {
+		for_each_fw_domain(domain, uncore, tmp) {
 			if (hrtimer_active(&domain->timer))
 				active_domains |= domain->mask;
 		}
@@ -407,20 +402,20 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
 			break;
 		}
 
-		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+		spin_unlock_irqrestore(&uncore->lock, irqflags);
 		cond_resched();
 	}
 
 	WARN_ON(active_domains);
 
-	fw = dev_priv->uncore.fw_domains_active;
+	fw = uncore->fw_domains_active;
 	if (fw)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
+		uncore->funcs.force_wake_put(uncore, fw);
 
-	fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
-	assert_forcewakes_inactive(dev_priv);
+	fw_domains_reset(uncore, uncore->fw_domains);
+	assert_forcewakes_inactive(uncore);
 
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&uncore->lock, irqflags);
 
 	return fw; /* track the lost user forcewake domains */
 }
@@ -546,10 +541,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
 	}
 
 	iosf_mbi_punit_acquire();
-	intel_uncore_forcewake_reset(dev_priv);
+	intel_uncore_forcewake_reset(&dev_priv->uncore);
 	if (restore_forcewake) {
 		spin_lock_irq(&dev_priv->uncore.lock);
-		dev_priv->uncore.funcs.force_wake_get(dev_priv,
+		dev_priv->uncore.funcs.force_wake_get(&dev_priv->uncore,
 						      restore_forcewake);
 
 		if (IS_GEN_RANGE(dev_priv, 6, 7))
@@ -566,7 +561,7 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
 		&dev_priv->uncore.pmic_bus_access_nb);
 	dev_priv->uncore.fw_domains_saved =
-		intel_uncore_forcewake_reset(dev_priv);
+		intel_uncore_forcewake_reset(&dev_priv->uncore);
 	iosf_mbi_punit_release();
 }
 
@@ -594,15 +589,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 	intel_sanitize_gt_powersave(dev_priv);
 }
 
-static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
 					 enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
 	unsigned int tmp;
 
-	fw_domains &= dev_priv->uncore.fw_domains;
+	fw_domains &= uncore->fw_domains;
 
-	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 		if (domain->wake_count++) {
 			fw_domains &= ~domain->mask;
 			domain->active = true;
@@ -610,7 +605,7 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 	}
 
 	if (fw_domains)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+		uncore->funcs.force_wake_get(uncore, fw_domains);
 }
 
 /**
@@ -629,16 +624,17 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 				enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long irqflags;
 
-	if (!dev_priv->uncore.funcs.force_wake_get)
+	if (!uncore->funcs.force_wake_get)
 		return;
 
 	assert_rpm_wakelock_held(dev_priv);
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	__intel_uncore_forcewake_get(dev_priv, fw_domains);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&uncore->lock, irqflags);
+	__intel_uncore_forcewake_get(uncore, fw_domains);
+	spin_unlock_irqrestore(&uncore->lock, irqflags);
 }
 
 /**
@@ -651,20 +647,22 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  */
 void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
 {
-	spin_lock_irq(&dev_priv->uncore.lock);
-	if (!dev_priv->uncore.user_forcewake.count++) {
+	struct intel_uncore *uncore = &dev_priv->uncore;
+
+	spin_lock_irq(&uncore->lock);
+	if (!uncore->user_forcewake.count++) {
 		intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
 
 		/* Save and disable mmio debugging for the user bypass */
-		dev_priv->uncore.user_forcewake.saved_mmio_check =
-			dev_priv->uncore.unclaimed_mmio_check;
-		dev_priv->uncore.user_forcewake.saved_mmio_debug =
+		uncore->user_forcewake.saved_mmio_check =
+			uncore->unclaimed_mmio_check;
+		uncore->user_forcewake.saved_mmio_debug =
 			i915_modparams.mmio_debug;
 
-		dev_priv->uncore.unclaimed_mmio_check = 0;
+		uncore->unclaimed_mmio_check = 0;
 		i915_modparams.mmio_debug = 0;
 	}
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	spin_unlock_irq(&uncore->lock);
 }
 
 /**
@@ -676,20 +674,22 @@ void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
  */
 void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
 {
-	spin_lock_irq(&dev_priv->uncore.lock);
-	if (!--dev_priv->uncore.user_forcewake.count) {
+	struct intel_uncore *uncore = &dev_priv->uncore;
+
+	spin_lock_irq(&uncore->lock);
+	if (!--uncore->user_forcewake.count) {
 		if (intel_uncore_unclaimed_mmio(dev_priv))
 			dev_info(dev_priv->drm.dev,
 				 "Invalid mmio detected during user access\n");
 
-		dev_priv->uncore.unclaimed_mmio_check =
-			dev_priv->uncore.user_forcewake.saved_mmio_check;
+		uncore->unclaimed_mmio_check =
+			uncore->user_forcewake.saved_mmio_check;
 		i915_modparams.mmio_debug =
-			dev_priv->uncore.user_forcewake.saved_mmio_debug;
+			uncore->user_forcewake.saved_mmio_debug;
 
 		intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 	}
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	spin_unlock_irq(&uncore->lock);
 }
 
 /**
@@ -703,23 +703,25 @@ void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
 					enum forcewake_domains fw_domains)
 {
-	lockdep_assert_held(&dev_priv->uncore.lock);
+	struct intel_uncore *uncore = &dev_priv->uncore;
 
-	if (!dev_priv->uncore.funcs.force_wake_get)
+	lockdep_assert_held(&uncore->lock);
+
+	if (!uncore->funcs.force_wake_get)
 		return;
 
-	__intel_uncore_forcewake_get(dev_priv, fw_domains);
+	__intel_uncore_forcewake_get(uncore, fw_domains);
 }
 
-static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
 					 enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
 	unsigned int tmp;
 
-	fw_domains &= dev_priv->uncore.fw_domains;
+	fw_domains &= uncore->fw_domains;
 
-	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 		if (WARN_ON(domain->wake_count == 0))
 			continue;
 
@@ -743,14 +745,15 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 				enum forcewake_domains fw_domains)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long irqflags;
 
-	if (!dev_priv->uncore.funcs.force_wake_put)
+	if (!uncore->funcs.force_wake_put)
 		return;
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	__intel_uncore_forcewake_put(dev_priv, fw_domains);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	spin_lock_irqsave(&uncore->lock, irqflags);
+	__intel_uncore_forcewake_put(uncore, fw_domains);
+	spin_unlock_irqrestore(&uncore->lock, irqflags);
 }
 
 /**
@@ -764,36 +767,38 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 					enum forcewake_domains fw_domains)
 {
-	lockdep_assert_held(&dev_priv->uncore.lock);
+	struct intel_uncore *uncore = &dev_priv->uncore;
+
+	lockdep_assert_held(&uncore->lock);
 
-	if (!dev_priv->uncore.funcs.force_wake_put)
+	if (!uncore->funcs.force_wake_put)
 		return;
 
-	__intel_uncore_forcewake_put(dev_priv, fw_domains);
+	__intel_uncore_forcewake_put(uncore, fw_domains);
 }
 
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct intel_uncore *uncore)
 {
-	if (!dev_priv->uncore.funcs.force_wake_get)
+	if (!uncore->funcs.force_wake_get)
 		return;
 
-	WARN(dev_priv->uncore.fw_domains_active,
+	WARN(uncore->fw_domains_active,
 	     "Expected all fw_domains to be inactive, but %08x are still on\n",
-	     dev_priv->uncore.fw_domains_active);
+	     uncore->fw_domains_active);
 }
 
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void assert_forcewakes_active(struct intel_uncore *uncore,
 			      enum forcewake_domains fw_domains)
 {
-	if (!dev_priv->uncore.funcs.force_wake_get)
+	if (!uncore->funcs.force_wake_get)
 		return;
 
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(uncore_to_i915(uncore));
 
-	fw_domains &= dev_priv->uncore.fw_domains;
-	WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+	fw_domains &= uncore->fw_domains;
+	WARN(fw_domains & ~uncore->fw_domains_active,
 	     "Expected %08x fw_domains to be active, but %08x are off\n",
-	     fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
+	     fw_domains, fw_domains & ~uncore->fw_domains_active);
 }
 
 /* We give fast paths for the really cool registers */
@@ -1157,32 +1162,32 @@ __gen2_read(64)
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
 
-static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+static noinline void ___force_wake_auto(struct intel_uncore *uncore,
 					enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
 	unsigned int tmp;
 
-	GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
 
-	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
+	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
 		fw_domain_arm_timer(domain);
 
-	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+	uncore->funcs.force_wake_get(uncore, fw_domains);
 }
 
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+static inline void __force_wake_auto(struct intel_uncore *uncore,
 				     enum forcewake_domains fw_domains)
 {
 	if (WARN_ON(!fw_domains))
 		return;
 
 	/* Turn on all requested but inactive supported forcewake domains. */
-	fw_domains &= dev_priv->uncore.fw_domains;
-	fw_domains &= ~dev_priv->uncore.fw_domains_active;
+	fw_domains &= uncore->fw_domains;
+	fw_domains &= ~uncore->fw_domains_active;
 
 	if (fw_domains)
-		___force_wake_auto(dev_priv, fw_domains);
+		___force_wake_auto(uncore, fw_domains);
 }
 
 #define __gen_read(func, x) \
@@ -1192,7 +1197,7 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
 	GEN6_READ_HEADER(x); \
 	fw_engine = __##func##_reg_read_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_auto(dev_priv, fw_engine); \
+		__force_wake_auto(&dev_priv->uncore, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
@@ -1284,7 +1289,7 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
 	GEN6_WRITE_HEADER; \
 	fw_engine = __##func##_reg_write_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_auto(dev_priv, fw_engine); \
+		__force_wake_auto(&dev_priv->uncore, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
@@ -1478,9 +1483,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
 		spin_lock_irq(&dev_priv->uncore.lock);
-		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
+		fw_domains_get_with_thread_status(&dev_priv->uncore, FORCEWAKE_RENDER);
 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		fw_domains_put(dev_priv, FORCEWAKE_RENDER);
+		fw_domains_put(&dev_priv->uncore, FORCEWAKE_RENDER);
 		spin_unlock_irq(&dev_priv->uncore.lock);
 
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
@@ -1634,7 +1639,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
 		&dev_priv->uncore.pmic_bus_access_nb);
-	intel_uncore_forcewake_reset(dev_priv);
+	intel_uncore_forcewake_reset(&dev_priv->uncore);
 	iosf_mbi_punit_release();
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 579a7f6b84453..73b1bd9a391da 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -32,6 +32,7 @@
 #include "i915_reg.h"
 
 struct drm_i915_private;
+struct intel_uncore;
 
 enum forcewake_domain_id {
 	FW_DOMAIN_ID_RENDER = 0,
@@ -62,9 +63,9 @@ enum forcewake_domains {
 };
 
 struct intel_uncore_funcs {
-	void (*force_wake_get)(struct drm_i915_private *dev_priv,
+	void (*force_wake_get)(struct intel_uncore *uncore,
 			       enum forcewake_domains domains);
-	void (*force_wake_put)(struct drm_i915_private *dev_priv,
+	void (*force_wake_put)(struct intel_uncore *uncore,
 			       enum forcewake_domains domains);
 
 	u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
@@ -127,12 +128,12 @@ struct intel_uncore {
 };
 
 /* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
 	for (tmp__ = (mask__); \
-	     tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+	     tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
 
-#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
-	for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
+#define for_each_fw_domain(domain__, uncore__, tmp__) \
+	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
 
 static inline struct intel_uncore *
 forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
@@ -151,8 +152,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
 void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void assert_forcewakes_inactive(struct intel_uncore *uncore);
+void assert_forcewakes_active(struct intel_uncore *uncore,
 			      enum forcewake_domains fw_domains);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index f9f5672b27c41..152cad02635dd 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -140,6 +140,7 @@ static int live_forcewake_ops(void *arg)
 	const struct reg *r;
 	struct drm_i915_private *i915 = arg;
 	struct intel_uncore_forcewake_domain *domain;
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
@@ -166,7 +167,7 @@ static int live_forcewake_ops(void *arg)
 
 	wakeref = intel_runtime_pm_get(i915);
 
-	for_each_fw_domain(domain, i915, tmp) {
+	for_each_fw_domain(domain, uncore, tmp) {
 		smp_store_mb(domain->active, false);
 		if (!hrtimer_cancel(&domain->timer))
 			continue;
@@ -188,7 +189,7 @@ static int live_forcewake_ops(void *arg)
 		if (!fw_domains)
 			continue;
 
-		for_each_fw_domain_masked(domain, fw_domains, i915, tmp) {
+		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 			if (!domain->wake_count)
 				continue;
 
@@ -203,7 +204,7 @@ static int live_forcewake_ops(void *arg)
 		intel_uncore_forcewake_put(i915, fw_domains);
 
 		/* Flush the forcewake release (delayed onto a timer) */
-		for_each_fw_domain_masked(domain, fw_domains, i915, tmp) {
+		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 			smp_store_mb(domain->active, false);
 			if (hrtimer_cancel(&domain->timer))
 				intel_uncore_fw_release_timer(&domain->timer);
@@ -280,7 +281,7 @@ static int live_forcewake_domains(void *arg)
 		i915_reg_t reg = { offset };
 
 		iosf_mbi_punit_acquire();
-		intel_uncore_forcewake_reset(dev_priv);
+		intel_uncore_forcewake_reset(&dev_priv->uncore);
 		iosf_mbi_punit_release();
 
 		check_for_unclaimed_mmio(dev_priv);

From 3ceea6a1b4d2426b49a9ebcc099cc147dc68e20b Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:36 -0700
Subject: [PATCH 21/95] drm/i915: use intel_uncore for all forcewake get/put

Now that the internal code all works on intel_uncore, flip the
external-facing interface.

v2: fix GVT.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-4-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/gvt/mmio_context.c       |  8 +--
 drivers/gpu/drm/i915/gvt/scheduler.c          |  4 +-
 drivers/gpu/drm/i915/i915_debugfs.c           | 12 ++---
 drivers/gpu/drm/i915/i915_gem.c               | 20 +++----
 drivers/gpu/drm/i915/i915_perf.c              |  6 +--
 drivers/gpu/drm/i915/i915_reset.c             | 12 ++---
 drivers/gpu/drm/i915/intel_display.c          |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        |  4 +-
 drivers/gpu/drm/i915/intel_guc.c              |  8 +--
 drivers/gpu/drm/i915/intel_guc_fw.c           |  4 +-
 drivers/gpu/drm/i915/intel_huc_fw.c           |  4 +-
 drivers/gpu/drm/i915/intel_pm.c               | 52 +++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.c       |  8 +--
 drivers/gpu/drm/i915/intel_uncore.c           | 52 ++++++++-----------
 drivers/gpu/drm/i915/intel_uncore.h           | 12 ++---
 drivers/gpu/drm/i915/intel_workarounds.c      |  4 +-
 drivers/gpu/drm/i915/selftests/intel_uncore.c |  8 +--
 17 files changed, 107 insertions(+), 115 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index f64c76dd11d4f..a00a807a1d559 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -356,7 +356,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
 		fw |= FORCEWAKE_RENDER;
 
-	intel_uncore_forcewake_get(dev_priv, fw);
+	intel_uncore_forcewake_get(&dev_priv->uncore, fw);
 
 	I915_WRITE_FW(reg, 0x1);
 
@@ -365,7 +365,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	else
 		vgpu_vreg_t(vgpu, reg) = 0;
 
-	intel_uncore_forcewake_put(dev_priv, fw);
+	intel_uncore_forcewake_put(&dev_priv->uncore, fw);
 
 	gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
 }
@@ -552,9 +552,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 	 * performace for batch mmio read/write, so we need
 	 * handle forcewake mannually.
 	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 	switch_mmio(pre, next, ring_id);
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 7550e09939aea..3faf2438b9bcf 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -988,7 +988,7 @@ static int workload_thread(void *priv)
 				workload->ring_id, workload);
 
 		if (need_force_wake)
-			intel_uncore_forcewake_get(gvt->dev_priv,
+			intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
 					FORCEWAKE_ALL);
 
 		ret = dispatch_workload(workload);
@@ -1010,7 +1010,7 @@ static int workload_thread(void *priv)
 		complete_current_workload(gvt, ring_id);
 
 		if (need_force_wake)
-			intel_uncore_forcewake_put(gvt->dev_priv,
+			intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
 					FORCEWAKE_ALL);
 
 		intel_runtime_pm_put_unchecked(gvt->dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a52b7cf1525d7..0dd8b3fa7fb98 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1094,7 +1094,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		}
 
 		/* RPSTAT1 is in the GT power well */
-		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
 		if (INTEL_GEN(dev_priv) >= 9)
@@ -1122,7 +1122,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		cagf = intel_gpu_freq(dev_priv,
 				      intel_get_cagf(dev_priv, rpstat));
 
-		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 		if (INTEL_GEN(dev_priv) >= 11) {
 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
@@ -2060,12 +2060,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		u32 rpup, rpupei;
 		u32 rpdown, rpdownei;
 
-		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
-		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
 			   rps_power_to_str(rps->power.mode));
@@ -4251,7 +4251,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 		return 0;
 
 	file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
-	intel_uncore_forcewake_user_get(i915);
+	intel_uncore_forcewake_user_get(&i915->uncore);
 
 	return 0;
 }
@@ -4263,7 +4263,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
 	if (INTEL_GEN(i915) < 6)
 		return 0;
 
-	intel_uncore_forcewake_user_put(i915);
+	intel_uncore_forcewake_user_put(&i915->uncore);
 	intel_runtime_pm_put(i915,
 			     (intel_wakeref_t)(uintptr_t)file->private_data);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 92389825a7ff3..1a684b7e8c094 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4318,7 +4318,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	GEM_TRACE("\n");
 
 	wakeref = intel_runtime_pm_get(i915);
-	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
 	/*
 	 * As we have just resumed the machine and woken the device up from
@@ -4339,7 +4339,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	 */
 	intel_engines_sanitize(i915, false);
 
-	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	intel_runtime_pm_put(i915, wakeref);
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -4438,7 +4438,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	WARN_ON(i915->gt.awake);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
@@ -4460,7 +4460,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 		goto err_wedged;
 
 out_unlock:
-	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return;
 
@@ -4549,7 +4549,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	dev_priv->gt.last_init_time = ktime_get();
 
 	/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@@ -4604,14 +4604,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto cleanup_uc;
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	return 0;
 
 cleanup_uc:
 	intel_uc_fini_hw(dev_priv);
 out:
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -4815,7 +4815,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * just magically go away.
 	 */
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	ret = i915_gem_init_ggtt(dev_priv);
 	if (ret) {
@@ -4877,7 +4877,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_init_hw;
 	}
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	return 0;
@@ -4912,7 +4912,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	i915_gem_fini_scratch(dev_priv);
 err_ggtt:
 err_unlock:
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 err_uc_misc:
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9b0292a38865f..e0fcb982a14f3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
 
 	free_oa_buffer(dev_priv);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 	intel_runtime_pm_put(dev_priv, stream->wakeref);
 
 	if (stream->ctx)
@@ -2093,7 +2093,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	 *   references will effectively disable RC6.
 	 */
 	stream->wakeref = intel_runtime_pm_get(dev_priv);
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	ret = alloc_oa_buffer(dev_priv);
 	if (ret)
@@ -2127,7 +2127,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 err_oa_buf_alloc:
 	put_oa_config(dev_priv, stream->oa_config);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 	intel_runtime_pm_put(dev_priv, stream->wakeref);
 
 err_config:
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index fb86d5ca5d8b8..0aea19cefe4ac 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -569,7 +569,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
 	 * If the power well sleeps during the reset, the reset
 	 * request may be dropped and never completes (causing -EIO).
 	 */
-	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
 		/*
 		 * We stop engines, otherwise we might get failed reset and a
@@ -593,7 +593,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
 		ret = reset(i915, engine_mask, retry);
 		preempt_enable();
 	}
-	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -622,9 +622,9 @@ int intel_reset_guc(struct drm_i915_private *i915)
 
 	GEM_BUG_ON(!HAS_GUC(i915));
 
-	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 	ret = gen6_hw_domain_reset(i915, guc_domain);
-	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -642,7 +642,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
 	 * written to the powercontext is undefined and so we may lose
 	 * GPU state upon resume, i.e. fail to restart after a reset.
 	 */
-	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
 	engine->reset.prepare(engine);
 }
 
@@ -713,7 +713,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
 static void reset_finish_engine(struct intel_engine_cs *engine)
 {
 	engine->reset.finish(engine);
-	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
 }
 
 struct i915_gpu_restart {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01cdd6e745c3d..17fb9f2c15c28 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9519,7 +9519,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
 	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	if (val & LCPLL_POWER_DOWN_ALLOW) {
 		val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -9551,7 +9551,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 			DRM_ERROR("Switching back to LCPLL failed\n");
 	}
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	intel_update_cdclk(dev_priv);
 	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 652c1b3ba1900..588c640b5a572 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -919,7 +919,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 						     FW_REG_READ | FW_REG_WRITE);
 
 	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
 
 	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
 
@@ -937,7 +937,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 
 	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
 
-	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
 	spin_unlock_irq(&dev_priv->uncore.lock);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8ecb470874570..a59448a56f55e 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -369,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc)
 	 * they are power context saved so it's ok to release forcewake
 	 * when we are done here and take it again at xfer time.
 	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
 
 	I915_WRITE(SOFT_SCRATCH(0), 0);
 
 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
 		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
 }
 
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -414,7 +414,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
 
 	mutex_lock(&guc->send_mutex);
-	intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+	intel_uncore_forcewake_get(&dev_priv->uncore, guc->send_regs.fw_domains);
 
 	for (i = 0; i < len; i++)
 		I915_WRITE(guc_send_reg(guc, i), action[i]);
@@ -454,7 +454,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 	ret = INTEL_GUC_MSG_TO_DATA(status);
 
 out:
-	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+	intel_uncore_forcewake_put(&dev_priv->uncore, guc->send_regs.fw_domains);
 	mutex_unlock(&guc->send_mutex);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 13ff7003c6bef..792a551450c72 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 
 	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	guc_prepare_xfer(guc);
 
@@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 
 	ret = guc_xfer_ucode(guc, vma);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 7d7bfc7f7ca70..92799b8502f51 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -112,7 +112,7 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 
 	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* Set the source address for the uCode */
 	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
@@ -140,7 +140,7 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 	/* Disable the bits once DMA is over */
 	I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53e9304c66ab8..fcd3baff8b65c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6781,9 +6781,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 	 * punit into committing the voltage change) as that takes a lot less
 	 * power than the render powerwell.
 	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
 	err = valleyview_set_rps(dev_priv, val);
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
 
 	if (err)
 		DRM_ERROR("Failed to set RPS for idle\n");
@@ -6933,11 +6933,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
 {
 	/* We're doing forcewake before Disabling RC6,
 	 * This what the BIOS expects when going into suspend */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@@ -7096,7 +7096,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 {
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* Program defaults and thresholds for RPS */
 	if (IS_GEN(dev_priv, 9))
@@ -7114,7 +7114,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
 	reset_rps(dev_priv, gen6_set_rps);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7128,7 +7128,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 
 	/* 1b: Get forcewake during program sequence. Although the driver
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* 2a: Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7205,7 +7205,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 		I915_WRITE(GEN9_PG_ENABLE,
 			   GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7218,7 +7218,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
 
 	/* 1b: Get forcewake during program sequence. Although the driver
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* 2a: Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7239,14 +7239,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
 		   GEN7_RC_CTL_TO_MODE |
 		   GEN6_RC_CTL_RC6_ENABLE);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* 1 Program defaults and thresholds for RPS*/
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -7279,7 +7279,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 
 	reset_rps(dev_priv, gen6_set_rps);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7299,7 +7299,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7347,7 +7347,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
 			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
 	}
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -7358,7 +7358,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	 * Perhaps there might be some value in exposing these to
 	 * userspace...
 	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* Power down if completely idle for over 50ms */
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
@@ -7366,7 +7366,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 
 	reset_rps(dev_priv, gen6_set_rps);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -7789,7 +7789,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
 
 	/* 1a & 1b: Get forcewake during program sequence. Although the driver
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/*  Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7821,14 +7821,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
 		rc6_mode = GEN7_RC_CTL_TO_MODE;
 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/* 1: Program defaults and thresholds for RPS*/
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@@ -7863,7 +7863,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 
 	reset_rps(dev_priv, valleyview_set_rps);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7881,7 +7881,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	/*  Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7906,14 +7906,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC_CONTROL,
 		   GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -7947,7 +7947,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 
 	reset_rps(dev_priv, valleyview_set_rps);
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -9999,7 +9999,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
 
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -10040,7 +10040,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
 	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
 
-	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 
 	return mul_u64_u32_div(time_hw, mul, div);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9e7ad17b5250d..720d39729ead8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -640,7 +640,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	struct intel_ring *ring = engine->buffer;
 	int ret = 0;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	if (!stop_ring(engine)) {
 		/* G45 ring initialization often fails to reset head to zero */
@@ -727,7 +727,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	/* Papering over lost _interrupts_ immediately following the restart */
 	intel_engine_queue_breadcrumbs(engine);
 out:
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -2075,7 +2075,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 {
 	struct drm_i915_private *dev_priv = request->i915;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
        /* Every tail move must follow the sequence below */
 
@@ -2105,7 +2105,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
 		      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 }
 
 static int mi_flush_dw(struct i915_request *rq, u32 flags)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ebbb9af970dc7..2e50697e72b43 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -610,7 +610,7 @@ static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
 
 /**
  * intel_uncore_forcewake_get - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  * @fw_domains: forcewake domains to get reference on
  *
  * This function can be used get GT's forcewake domain references.
@@ -621,16 +621,15 @@ static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  */
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 				enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long irqflags;
 
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(uncore_to_i915(uncore));
 
 	spin_lock_irqsave(&uncore->lock, irqflags);
 	__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -639,19 +638,17 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 
 /**
  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  *
  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
  * the GT powerwell and in the process disable our debugging for the
  * duration of userspace's bypass.
  */
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
 	spin_lock_irq(&uncore->lock);
 	if (!uncore->user_forcewake.count++) {
-		intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
 
 		/* Save and disable mmio debugging for the user bypass */
 		uncore->user_forcewake.saved_mmio_check =
@@ -667,19 +664,19 @@ void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
 
 /**
  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  *
  * This function complements intel_uncore_forcewake_user_get() and releases
  * the GT powerwell taken on behalf of the userspace bypass.
  */
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
 	spin_lock_irq(&uncore->lock);
 	if (!--uncore->user_forcewake.count) {
-		if (intel_uncore_unclaimed_mmio(dev_priv))
-			dev_info(dev_priv->drm.dev,
+		if (intel_uncore_unclaimed_mmio(i915))
+			dev_info(i915->drm.dev,
 				 "Invalid mmio detected during user access\n");
 
 		uncore->unclaimed_mmio_check =
@@ -687,24 +684,22 @@ void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
 		i915_modparams.mmio_debug =
 			uncore->user_forcewake.saved_mmio_debug;
 
-		intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
 	}
 	spin_unlock_irq(&uncore->lock);
 }
 
 /**
  * intel_uncore_forcewake_get__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  * @fw_domains: forcewake domains to get reference on
  *
  * See intel_uncore_forcewake_get(). This variant places the onus
  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  */
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
 					enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
 	lockdep_assert_held(&uncore->lock);
 
 	if (!uncore->funcs.force_wake_get)
@@ -736,16 +731,15 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
 
 /**
  * intel_uncore_forcewake_put - release a forcewake domain reference
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  * @fw_domains: forcewake domains to put references
  *
  * This function drops the device-level forcewakes for specified
  * domains obtained by intel_uncore_forcewake_get().
  */
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
 				enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long irqflags;
 
 	if (!uncore->funcs.force_wake_put)
@@ -758,17 +752,15 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 
 /**
  * intel_uncore_forcewake_put__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
  * @fw_domains: forcewake domains to get reference on
  *
  * See intel_uncore_forcewake_put(). This variant places the onus
  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  */
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
 					enum forcewake_domains fw_domains)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
 	lockdep_assert_held(&uncore->lock);
 
 	if (!uncore->funcs.force_wake_put)
@@ -1535,11 +1527,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 		 * the access.
 		 */
 		disable_rpm_wakeref_asserts(dev_priv);
-		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 		enable_rpm_wakeref_asserts(dev_priv);
 		break;
 	case MBI_PMIC_BUS_ACCESS_END:
-		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 		break;
 	}
 
@@ -1796,13 +1788,13 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	might_sleep_if(slow_timeout_ms);
 
 	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(dev_priv, fw);
+	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw);
 
 	ret = __intel_wait_for_register_fw(dev_priv,
 					   reg, mask, value,
 					   fast_timeout_us, 0, &reg_value);
 
-	intel_uncore_forcewake_put__locked(dev_priv, fw);
+	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw);
 	spin_unlock_irq(&dev_priv->uncore.lock);
 
 	if (ret && slow_timeout_ms)
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 73b1bd9a391da..b9010184a7802 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -163,20 +163,20 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
 #define FW_REG_READ  (1)
 #define FW_REG_WRITE (2)
 
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 				enum forcewake_domains domains);
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
 				enum forcewake_domains domains);
 /* Like above but the caller must manage the uncore.lock itself.
  * Must be used with I915_READ_FW and friends.
  */
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
 					enum forcewake_domains domains);
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
 					enum forcewake_domains domains);
 
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
 
 int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 			      i915_reg_t reg,
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 283e9a4ef3ca5..e758bbf506176 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -927,7 +927,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
 	fw = wal_get_fw_for_rmw(dev_priv, wal);
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-	intel_uncore_forcewake_get__locked(dev_priv, fw);
+	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw);
 
 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
 		u32 val = I915_READ_FW(wa->reg);
@@ -938,7 +938,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
 		I915_WRITE_FW(wa->reg, val);
 	}
 
-	intel_uncore_forcewake_put__locked(dev_priv, fw);
+	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 152cad02635dd..6146a7ba762bc 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -199,9 +199,9 @@ static int live_forcewake_ops(void *arg)
 			goto out_rpm;
 		}
 
-		intel_uncore_forcewake_get(i915, fw_domains);
+		intel_uncore_forcewake_get(uncore, fw_domains);
 		val = readl(reg);
-		intel_uncore_forcewake_put(i915, fw_domains);
+		intel_uncore_forcewake_put(uncore, fw_domains);
 
 		/* Flush the forcewake release (delayed onto a timer) */
 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
@@ -263,7 +263,7 @@ static int live_forcewake_domains(void *arg)
 	if (!valid)
 		return -ENOMEM;
 
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	check_for_unclaimed_mmio(dev_priv);
 	for (offset = 0; offset < FW_RANGE; offset += 4) {
@@ -274,7 +274,7 @@ static int live_forcewake_domains(void *arg)
 			set_bit(offset, valid);
 	}
 
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	err = 0;
 	for_each_set_bit(offset, valid, FW_RANGE) {

From f7de50278e5ccaa2741c0871a41db63d70058a4e Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:37 -0700
Subject: [PATCH 22/95] drm/i915: make more uncore function work on
 intel_uncore

Move the init, fini, prune, suspend, resume function to work on
intel_uncore instead of dev_priv.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-5-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c               |  20 +-
 drivers/gpu/drm/i915/intel_uncore.c           | 273 +++++++++---------
 drivers/gpu/drm/i915/intel_uncore.h           |  12 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.c  |   6 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.h  |   2 +-
 6 files changed, 159 insertions(+), 156 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4ec41c3a90388..fd2de1d58b643 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1031,11 +1031,11 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		goto err_bridge;
 
-	intel_uncore_init(dev_priv);
+	intel_uncore_init(&dev_priv->uncore);
 
 	intel_device_info_init_mmio(dev_priv);
 
-	intel_uncore_prune(dev_priv);
+	intel_uncore_prune(&dev_priv->uncore);
 
 	intel_uc_init_mmio(dev_priv);
 
@@ -1048,7 +1048,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_uncore:
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1062,7 +1062,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 {
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 	pci_dev_put(dev_priv->bridge_dev);
 }
@@ -2124,7 +2124,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	i915_gem_suspend_late(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	intel_power_domains_suspend(dev_priv,
 				    get_suspend_mode(dev_priv, hibernation));
@@ -2320,7 +2320,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
 			  ret);
 
-	intel_uncore_resume_early(dev_priv);
+	intel_uncore_resume_early(&dev_priv->uncore);
+
+	i915_check_and_clear_faults(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
 		gen9_sanitize_dc_state(dev_priv);
@@ -2890,7 +2892,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 	intel_runtime_pm_disable_interrupts(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	ret = 0;
 	if (INTEL_GEN(dev_priv) >= 11) {
@@ -2907,7 +2909,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 	if (ret) {
 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-		intel_uncore_runtime_resume(dev_priv);
+		intel_uncore_runtime_resume(&dev_priv->uncore);
 
 		intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -3004,7 +3006,7 @@ static int intel_runtime_resume(struct device *kdev)
 		ret = vlv_resume_prepare(dev_priv, true);
 	}
 
-	intel_uncore_runtime_resume(dev_priv);
+	intel_uncore_runtime_resume(&dev_priv->uncore);
 
 	intel_runtime_pm_enable_interrupts(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 2e50697e72b43..d58f96ea774cb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -525,62 +525,58 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 					  unsigned int restore_forcewake)
 {
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
 	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(dev_priv))
+	if (check_for_unclaimed_mmio(i915))
 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	/* WaDisableShadowRegForCpd:chv */
-	if (IS_CHERRYVIEW(dev_priv)) {
-		__raw_i915_write32(dev_priv, GTFIFOCTL,
-				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
+	if (IS_CHERRYVIEW(i915)) {
+		__raw_i915_write32(i915, GTFIFOCTL,
+				   __raw_i915_read32(i915, GTFIFOCTL) |
 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 				   GT_FIFO_CTL_RC6_POLICY_STALL);
 	}
 
 	iosf_mbi_punit_acquire();
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+	intel_uncore_forcewake_reset(uncore);
 	if (restore_forcewake) {
-		spin_lock_irq(&dev_priv->uncore.lock);
-		dev_priv->uncore.funcs.force_wake_get(&dev_priv->uncore,
-						      restore_forcewake);
-
-		if (IS_GEN_RANGE(dev_priv, 6, 7))
-			dev_priv->uncore.fifo_count =
-				fifo_free_entries(dev_priv);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+		if (IS_GEN_RANGE(i915, 6, 7))
+			uncore->fifo_count = fifo_free_entries(i915);
+		spin_unlock_irq(&uncore->lock);
 	}
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
 {
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	dev_priv->uncore.fw_domains_saved =
-		intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
 {
 	unsigned int restore_forcewake;
 
-	restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
-	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+	__intel_uncore_early_sanitize(uncore, restore_forcewake);
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	i915_check_and_clear_faults(dev_priv);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
 {
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -1309,29 +1305,29 @@ __gen6_write(32)
 #undef GEN6_WRITE_FOOTER
 #undef GEN6_WRITE_HEADER
 
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_writeb = x##_write8; \
-	(i915)->uncore.funcs.mmio_writew = x##_write16; \
-	(i915)->uncore.funcs.mmio_writel = x##_write32; \
+	(uncore)->funcs.mmio_writeb = x##_write8; \
+	(uncore)->funcs.mmio_writew = x##_write16; \
+	(uncore)->funcs.mmio_writel = x##_write32; \
 } while (0)
 
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_readb = x##_read8; \
-	(i915)->uncore.funcs.mmio_readw = x##_read16; \
-	(i915)->uncore.funcs.mmio_readl = x##_read32; \
-	(i915)->uncore.funcs.mmio_readq = x##_read64; \
+	(uncore)->funcs.mmio_readb = x##_read8; \
+	(uncore)->funcs.mmio_readw = x##_read16; \
+	(uncore)->funcs.mmio_readl = x##_read32; \
+	(uncore)->funcs.mmio_readq = x##_read64; \
 } while (0)
 
 
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id,
 			   i915_reg_t reg_set,
 			   i915_reg_t reg_ack)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	struct intel_uncore_forcewake_domain *d;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
@@ -1344,8 +1340,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
 	d->wake_count = 0;
-	d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
-	d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
+	d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
+	d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
 
 	d->id = domain_id;
 
@@ -1370,7 +1366,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	fw_domain_reset(d);
 }
 
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id)
 {
 	struct intel_uncore_forcewake_domain *d;
@@ -1378,74 +1374,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
 
-	d = &dev_priv->uncore.fw_domain[domain_id];
+	d = &uncore->fw_domain[domain_id];
 
 	WARN_ON(d->wake_count);
 	WARN_ON(hrtimer_cancel(&d->timer));
 	memset(d, 0, sizeof(*d));
 
-	dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+	uncore->fw_domains &= ~BIT(domain_id);
 }
 
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
 		return;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		int i;
 
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
 		for (i = 0; i < I915_MAX_VCS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VCS(i)))
+			if (!HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
 		}
 		for (i = 0; i < I915_MAX_VECS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VECS(i)))
+			if (!HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		uncore->funcs.force_wake_get = fw_domains_get;
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
-	} else if (IS_IVYBRIDGE(dev_priv)) {
+	} else if (IS_IVYBRIDGE(i915)) {
 		u32 ecobus;
 
 		/* IVB configs may use multi-threaded forcewake */
@@ -1457,9 +1455,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * (correctly) interpreted by the test below as MT
 		 * forcewake being disabled.
 		 */
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		uncore->funcs.force_wake_put = fw_domains_put;
 
 		/* We need to init first for ECOBUS access and then
 		 * determine later if we want to reinit, in case of MT access is
@@ -1468,41 +1466,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * before the ecobus check.
 		 */
 
-		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
-		__raw_posting_read(dev_priv, ECOBUS);
+		__raw_i915_write32(i915, FORCEWAKE, 0);
+		__raw_posting_read(i915, ECOBUS);
 
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
-		spin_lock_irq(&dev_priv->uncore.lock);
-		fw_domains_get_with_thread_status(&dev_priv->uncore, FORCEWAKE_RENDER);
-		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		fw_domains_put(&dev_priv->uncore, FORCEWAKE_RENDER);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+		ecobus = __raw_i915_read32(i915, ECOBUS);
+		fw_domains_put(uncore, FORCEWAKE_RENDER);
+		spin_unlock_irq(&uncore->lock);
 
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
 			DRM_INFO("when using vblank-synced partial screen updates.\n");
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 				       FORCEWAKE, FORCEWAKE_ACK);
 		}
-	} else if (IS_GEN(dev_priv, 6)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN(i915, 6)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE, FORCEWAKE_ACK);
 	}
 
 	/* All future platforms are expected to require complex power gating */
-	WARN_ON(dev_priv->uncore.fw_domains == 0);
+	WARN_ON(uncore->fw_domains == 0);
 }
 
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
 { \
-	dev_priv->uncore.fw_domains_table = \
+	(uncore)->fw_domains_table = \
 			(struct intel_forcewake_range *)(d); \
-	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
 }
 
 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1538,55 +1536,56 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+void intel_uncore_init(struct intel_uncore *uncore)
 {
-	i915_check_vgpu(dev_priv);
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
-	intel_uncore_edram_detect(dev_priv);
-	intel_uncore_fw_domains_init(dev_priv);
-	__intel_uncore_early_sanitize(dev_priv, 0);
+	i915_check_vgpu(i915);
 
-	dev_priv->uncore.unclaimed_mmio_check = 1;
-	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
-		i915_pmic_bus_access_notifier;
+	intel_uncore_edram_detect(i915);
+	intel_uncore_fw_domains_init(uncore);
+	__intel_uncore_early_sanitize(uncore, 0);
 
-	if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
-	} else if (IS_GEN(dev_priv, 5)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
-	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
+	uncore->unclaimed_mmio_check = 1;
+	uncore->pmic_bus_access_nb.notifier_call =
+		i915_pmic_bus_access_notifier;
 
-		if (IS_VALLEYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+	} else if (IS_GEN(i915, 5)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+	} else if (IS_GEN_RANGE(i915, 6, 7)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+		if (IS_VALLEYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 		} else {
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN(dev_priv, 8)) {
-		if (IS_CHERRYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN(i915, 8)) {
+		if (IS_CHERRYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 
 		} else {
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 	} else {
-		ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
 	}
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 /*
@@ -1594,44 +1593,46 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
  * the forcewake domains. Prune them, to make sure they only reference existing
  * engines.
  */
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) >= 11) {
-		enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) >= 11) {
+		enum forcewake_domains fw_domains = uncore->fw_domains;
 		enum forcewake_domain_id domain_id;
 		int i;
 
 		for (i = 0; i < I915_MAX_VCS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VCS(i)))
+			if (HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 
 		for (i = 0; i < I915_MAX_VECS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VECS(i)))
+			if (HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 	}
 }
 
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini(struct intel_uncore *uncore)
 {
 	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_uncore_sanitize(dev_priv);
+	intel_uncore_sanitize(uncore_to_i915(uncore));
 
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b9010184a7802..b1b596c814516 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -142,14 +142,14 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
+void intel_uncore_init(struct intel_uncore *uncore);
+void intel_uncore_prune(struct intel_uncore *uncore);
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
+void intel_uncore_fini(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 54cfb611c0aa3..60bbf8b4df404 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -182,7 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
 		I915_GTT_PAGE_SIZE_64K |
 		I915_GTT_PAGE_SIZE_2M;
 
-	mock_uncore_init(i915);
+	mock_uncore_init(&i915->uncore);
 	i915_gem_init__mm(i915);
 
 	init_waitqueue_head(&i915->gpu_error.wait_queue);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e382..c3896c1fd5515 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,8 @@ __nop_read(16)
 __nop_read(32)
 __nop_read(64)
 
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
 {
-	ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
-	ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+	ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+	ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d510..dacb36b5ffcdf 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
 #ifndef __MOCK_UNCORE_H
 #define __MOCK_UNCORE_H
 
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
 
 #endif /* !__MOCK_UNCORE_H */

From cb7ee69015aaba5e1091af94e73bc72483c08e37 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:38 -0700
Subject: [PATCH 23/95] drm/i915: make find_fw_domain work on intel_uncore

Remove unneeded usage of dev_priv from 1 extra function.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-6-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_uncore.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d58f96ea774cb..62ee4bc6b7a6b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -835,13 +835,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
 })
 
 static enum forcewake_domains
-find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+find_fw_domain(struct intel_uncore *uncore, u32 offset)
 {
 	const struct intel_forcewake_range *entry;
 
 	entry = BSEARCH(offset,
-			dev_priv->uncore.fw_domains_table,
-			dev_priv->uncore.fw_domains_table_entries,
+			uncore->fw_domains_table,
+			uncore->fw_domains_table_entries,
 			fw_range_cmp);
 
 	if (!entry)
@@ -853,11 +853,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
 	 * translate it here to the list of available domains.
 	 */
 	if (entry->domains == FORCEWAKE_ALL)
-		return dev_priv->uncore.fw_domains;
+		return uncore->fw_domains;
 
-	WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+	WARN(entry->domains & ~uncore->fw_domains,
 	     "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
-	     entry->domains & ~dev_priv->uncore.fw_domains, offset);
+	     entry->domains & ~uncore->fw_domains, offset);
 
 	return entry->domains;
 }
@@ -885,7 +885,7 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (NEEDS_FORCE_WAKE((offset))) \
-		__fwd = find_fw_domain(dev_priv, offset); \
+		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
 	__fwd; \
 })
 
@@ -893,7 +893,7 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (GEN11_NEEDS_FORCE_WAKE((offset))) \
-		__fwd = find_fw_domain(dev_priv, offset); \
+		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
 	__fwd; \
 })
 
@@ -979,7 +979,7 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
-		__fwd = find_fw_domain(dev_priv, offset); \
+		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
 	__fwd; \
 })
 
@@ -987,7 +987,7 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
-		__fwd = find_fw_domain(dev_priv, offset); \
+		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
 	__fwd; \
 })
 

From 272c7e52302e910abd49b3fbc871d9609c72b63d Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:39 -0700
Subject: [PATCH 24/95] drm/i915: reduce the dev_priv->uncore dance in uncore.c

Use a local variable where it makes sense.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-7-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_uncore.c | 79 ++++++++++++++++-------------
 1 file changed, 43 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 62ee4bc6b7a6b..bb9a10cc6904d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -313,6 +313,7 @@ static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
 
 static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 n;
 
 	/* On VLV, FIFO will be shared by both SW and HW.
@@ -320,7 +321,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 	if (IS_VALLEYVIEW(dev_priv))
 		n = fifo_free_entries(dev_priv);
 	else
-		n = dev_priv->uncore.fifo_count;
+		n = uncore->fifo_count;
 
 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
 		if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
@@ -331,7 +332,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	dev_priv->uncore.fifo_count = n - 1;
+	uncore->fifo_count = n - 1;
 }
 
 static enum hrtimer_restart
@@ -795,7 +796,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 #define GEN11_NEEDS_FORCE_WAKE(reg) \
 	((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
 
-#define __gen6_reg_read_fw_domains(offset) \
+#define __gen6_reg_read_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd; \
 	if (NEEDS_FORCE_WAKE(offset)) \
@@ -881,19 +882,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
 };
 
-#define __fwtable_reg_read_fw_domains(offset) \
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (NEEDS_FORCE_WAKE((offset))) \
-		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
+		__fwd = find_fw_domain(uncore, offset); \
 	__fwd; \
 })
 
-#define __gen11_fwtable_reg_read_fw_domains(offset) \
+#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (GEN11_NEEDS_FORCE_WAKE((offset))) \
-		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
+		__fwd = find_fw_domain(uncore, offset); \
 	__fwd; \
 })
 
@@ -945,7 +946,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
 __is_genX_shadowed(8)
 __is_genX_shadowed(11)
 
-#define __gen8_reg_write_fw_domains(offset) \
+#define __gen8_reg_write_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd; \
 	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
@@ -975,19 +976,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
 };
 
-#define __fwtable_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
-		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
+		__fwd = find_fw_domain(uncore, offset); \
 	__fwd; \
 })
 
-#define __gen11_fwtable_reg_write_fw_domains(offset) \
+#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd = 0; \
 	if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
-		__fwd = find_fw_domain(&dev_priv->uncore, offset); \
+		__fwd = find_fw_domain(uncore, offset); \
 	__fwd; \
 })
 
@@ -1137,16 +1138,17 @@ __gen2_read(64)
 #undef GEN2_READ_HEADER
 
 #define GEN6_READ_HEADER(x) \
+	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	u##x val = 0; \
 	assert_rpm_wakelock_held(dev_priv); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(dev_priv, reg, true, true)
 
 #define GEN6_READ_FOOTER \
 	unclaimed_reg_debug(dev_priv, reg, true, false); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	spin_unlock_irqrestore(&uncore->lock, irqflags); \
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
 
@@ -1183,9 +1185,9 @@ static u##x \
 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	fw_engine = __##func##_reg_read_fw_domains(offset); \
+	fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
 	if (fw_engine) \
-		__force_wake_auto(&dev_priv->uncore, fw_engine); \
+		__force_wake_auto(uncore, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
@@ -1249,16 +1251,17 @@ __gen2_write(32)
 #undef GEN2_WRITE_HEADER
 
 #define GEN6_WRITE_HEADER \
+	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	assert_rpm_wakelock_held(dev_priv); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(dev_priv, reg, false, true)
 
 #define GEN6_WRITE_FOOTER \
 	unclaimed_reg_debug(dev_priv, reg, false, false); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+	spin_unlock_irqrestore(&uncore->lock, irqflags)
 
 #define __gen6_write(x) \
 static void \
@@ -1275,9 +1278,9 @@ static void \
 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	fw_engine = __##func##_reg_write_fw_domains(offset); \
+	fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
 	if (fw_engine) \
-		__force_wake_auto(&dev_priv->uncore, fw_engine); \
+		__force_wake_auto(uncore, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
@@ -1781,6 +1784,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 			    unsigned int slow_timeout_ms,
 			    u32 *out_value)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned fw =
 		intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
 	u32 reg_value;
@@ -1788,15 +1792,15 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 
 	might_sleep_if(slow_timeout_ms);
 
-	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw);
+	spin_lock_irq(&uncore->lock);
+	intel_uncore_forcewake_get__locked(uncore, fw);
 
 	ret = __intel_wait_for_register_fw(dev_priv,
 					   reg, mask, value,
 					   fast_timeout_us, 0, &reg_value);
 
-	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw);
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	intel_uncore_forcewake_put__locked(uncore, fw);
+	spin_unlock_irq(&uncore->lock);
 
 	if (ret && slow_timeout_ms)
 		ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
@@ -1820,11 +1824,12 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 bool
 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	bool ret = false;
 
-	spin_lock_irq(&dev_priv->uncore.lock);
+	spin_lock_irq(&uncore->lock);
 
-	if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+	if (unlikely(uncore->unclaimed_mmio_check <= 0))
 		goto out;
 
 	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
@@ -1834,12 +1839,12 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
 				  "Please use i915.mmio_debug=N for more information.\n");
 			i915_modparams.mmio_debug++;
 		}
-		dev_priv->uncore.unclaimed_mmio_check--;
+		uncore->unclaimed_mmio_check--;
 		ret = true;
 	}
 
 out:
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	spin_unlock_irq(&uncore->lock);
 
 	return ret;
 }
@@ -1848,21 +1853,22 @@ static enum forcewake_domains
 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 				i915_reg_t reg)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
 	if (INTEL_GEN(dev_priv) >= 11) {
-		fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
+		fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
 	} else if (HAS_FWTABLE(dev_priv)) {
-		fw_domains = __fwtable_reg_read_fw_domains(offset);
+		fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
 	} else if (INTEL_GEN(dev_priv) >= 6) {
-		fw_domains = __gen6_reg_read_fw_domains(offset);
+		fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
 	} else {
 		WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
 		fw_domains = 0;
 	}
 
-	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+	WARN_ON(fw_domains & ~uncore->fw_domains);
 
 	return fw_domains;
 }
@@ -1871,15 +1877,16 @@ static enum forcewake_domains
 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 				 i915_reg_t reg)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
 	if (INTEL_GEN(dev_priv) >= 11) {
-		fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
+		fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
 	} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
-		fw_domains = __fwtable_reg_write_fw_domains(offset);
+		fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
 	} else if (IS_GEN(dev_priv, 8)) {
-		fw_domains = __gen8_reg_write_fw_domains(offset);
+		fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
 	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
 		fw_domains = FORCEWAKE_RENDER;
 	} else {
@@ -1887,7 +1894,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 		fw_domains = 0;
 	}
 
-	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+	WARN_ON(fw_domains & ~uncore->fw_domains);
 
 	return fw_domains;
 }

From 25286aaca9cee11daa0cede2bcd62d48e440d9f3 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:40 -0700
Subject: [PATCH 25/95] drm/i915: move regs pointer inside the uncore structure

This will allow futher simplifications in the uncore handling.

v2: move register access setup under uncore (Chris)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-8-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c               | 49 ++---------------
 drivers/gpu/drm/i915/i915_drv.h               |  6 +--
 drivers/gpu/drm/i915/i915_irq.c               | 22 ++++----
 drivers/gpu/drm/i915/intel_lrc.c              |  6 +--
 drivers/gpu/drm/i915/intel_uncore.c           | 54 +++++++++++++++++--
 drivers/gpu/drm/i915/intel_uncore.h           |  4 +-
 drivers/gpu/drm/i915/selftests/intel_uncore.c |  2 +-
 7 files changed, 75 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fd2de1d58b643..d01689737f1e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -968,46 +968,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 	i915_engines_cleanup(dev_priv);
 }
 
-static int i915_mmio_setup(struct drm_i915_private *dev_priv)
-{
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	int mmio_bar;
-	int mmio_size;
-
-	mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
-	/*
-	 * Before gen4, the registers and the GTT are behind different BARs.
-	 * However, from gen4 onwards, the registers and the GTT are shared
-	 * in the same BAR, so we want to restrict this ioremap from
-	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-	 * the register BAR remains the same size for all the earlier
-	 * generations up to Ironlake.
-	 */
-	if (INTEL_GEN(dev_priv) < 5)
-		mmio_size = 512 * 1024;
-	else
-		mmio_size = 2 * 1024 * 1024;
-	dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
-	if (dev_priv->regs == NULL) {
-		DRM_ERROR("failed to map registers\n");
-
-		return -EIO;
-	}
-
-	/* Try to make sure MCHBAR is enabled before poking at it */
-	intel_setup_mchbar(dev_priv);
-
-	return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
-{
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-
-	intel_teardown_mchbar(dev_priv);
-	pci_iounmap(pdev, dev_priv->regs);
-}
-
 /**
  * i915_driver_init_mmio - setup device MMIO
  * @dev_priv: device private
@@ -1027,11 +987,12 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	if (i915_get_bridge_dev(dev_priv))
 		return -EIO;
 
-	ret = i915_mmio_setup(dev_priv);
+	ret = intel_uncore_init(&dev_priv->uncore);
 	if (ret < 0)
 		goto err_bridge;
 
-	intel_uncore_init(&dev_priv->uncore);
+	/* Try to make sure MCHBAR is enabled before poking at it */
+	intel_setup_mchbar(dev_priv);
 
 	intel_device_info_init_mmio(dev_priv);
 
@@ -1048,8 +1009,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_uncore:
+	intel_teardown_mchbar(dev_priv);
 	intel_uncore_fini(&dev_priv->uncore);
-	i915_mmio_cleanup(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 
@@ -1062,8 +1023,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 {
+	intel_teardown_mchbar(dev_priv);
 	intel_uncore_fini(&dev_priv->uncore);
-	i915_mmio_cleanup(dev_priv);
 	pci_dev_put(dev_priv->bridge_dev);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9ac9183fbffa..54c9d82590a4a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1505,8 +1505,6 @@ struct drm_i915_private {
 	 */
 	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
 
-	void __iomem *regs;
-
 	struct intel_uncore uncore;
 
 	struct i915_virtual_gpu vgpu;
@@ -3489,14 +3487,14 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
 					     i915_reg_t reg) \
 { \
-	return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
+	return read##s(dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
 }
 
 #define __raw_write(x, s) \
 static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
 				       i915_reg_t reg, uint##x##_t val) \
 { \
-	write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
+	write##s(val, dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
 }
 __raw_read(8, b)
 __raw_read(16, w)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1375bba455487..2f788291cfe0b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -268,7 +268,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
 				const unsigned int bank,
 				const unsigned int bit)
 {
-	void __iomem * const regs = i915->regs;
+	void __iomem * const regs = i915->uncore.regs;
 	u32 dw;
 
 	lockdep_assert_held(&i915->irq_lock);
@@ -1479,7 +1479,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
 			    u32 master_ctl, u32 gt_iir[4])
 {
-	void __iomem * const regs = i915->regs;
+	void __iomem * const regs = i915->uncore.regs;
 
 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
 		      GEN8_GT_BCS_IRQ | \
@@ -2876,7 +2876,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
 	struct drm_i915_private *dev_priv = to_i915(arg);
-	void __iomem * const regs = dev_priv->regs;
+	void __iomem * const regs = dev_priv->uncore.regs;
 	u32 master_ctl;
 	u32 gt_iir[4];
 
@@ -2910,7 +2910,7 @@ static u32
 gen11_gt_engine_identity(struct drm_i915_private * const i915,
 			 const unsigned int bank, const unsigned int bit)
 {
-	void __iomem * const regs = i915->regs;
+	void __iomem * const regs = i915->uncore.regs;
 	u32 timeout_ts;
 	u32 ident;
 
@@ -2994,7 +2994,7 @@ static void
 gen11_gt_bank_handler(struct drm_i915_private * const i915,
 		      const unsigned int bank)
 {
-	void __iomem * const regs = i915->regs;
+	void __iomem * const regs = i915->uncore.regs;
 	unsigned long intr_dw;
 	unsigned int bit;
 
@@ -3037,7 +3037,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
 static u32
 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 {
-	void __iomem * const regs = dev_priv->regs;
+	void __iomem * const regs = dev_priv->uncore.regs;
 	u32 iir;
 
 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3078,7 +3078,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
 	struct drm_i915_private * const i915 = to_i915(arg);
-	void __iomem * const regs = i915->regs;
+	void __iomem * const regs = i915->uncore.regs;
 	u32 master_ctl;
 	u32 gu_misc_iir;
 
@@ -3359,7 +3359,7 @@ static void gen8_irq_reset(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int pipe;
 
-	gen8_master_intr_disable(dev_priv->regs);
+	gen8_master_intr_disable(dev_priv->uncore.regs);
 
 	gen8_gt_irq_reset(dev_priv);
 
@@ -3401,7 +3401,7 @@ static void gen11_irq_reset(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 
-	gen11_master_intr_disable(dev_priv->regs);
+	gen11_master_intr_disable(dev_priv->uncore.regs);
 
 	gen11_gt_irq_reset(dev_priv);
 
@@ -4006,7 +4006,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_postinstall(dev);
 
-	gen8_master_intr_enable(dev_priv->regs);
+	gen8_master_intr_enable(dev_priv->uncore.regs);
 
 	return 0;
 }
@@ -4068,7 +4068,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
 
 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
-	gen11_master_intr_enable(dev_priv->regs);
+	gen11_master_intr_enable(dev_priv->uncore.regs);
 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 51c2ea164b36f..82ee3f6695643 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2409,12 +2409,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 	intel_engine_init_workarounds(engine);
 
 	if (HAS_LOGICAL_RING_ELSQ(i915)) {
-		execlists->submit_reg = i915->regs +
+		execlists->submit_reg = i915->uncore.regs +
 			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
-		execlists->ctrl_reg = i915->regs +
+		execlists->ctrl_reg = i915->uncore.regs +
 			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
 	} else {
-		execlists->submit_reg = i915->regs +
+		execlists->submit_reg = i915->uncore.regs +
 			i915_mmio_reg_offset(RING_ELSP(engine));
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index bb9a10cc6904d..e60856069971f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1330,7 +1330,6 @@ static void fw_domain_init(struct intel_uncore *uncore,
 			   i915_reg_t reg_ack)
 {
 	struct intel_uncore_forcewake_domain *d;
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
@@ -1343,8 +1342,8 @@ static void fw_domain_init(struct intel_uncore *uncore,
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
 	d->wake_count = 0;
-	d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
-	d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
+	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
+	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
 
 	d->id = domain_id;
 
@@ -1539,9 +1538,53 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
-void intel_uncore_init(struct intel_uncore *uncore)
+static int uncore_mmio_setup(struct intel_uncore *uncore)
 {
 	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	struct pci_dev *pdev = i915->drm.pdev;
+	int mmio_bar;
+	int mmio_size;
+
+	mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+	/*
+	 * Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (INTEL_GEN(i915) < 5)
+		mmio_size = 512 * 1024;
+	else
+		mmio_size = 2 * 1024 * 1024;
+	uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+	if (uncore->regs == NULL) {
+		DRM_ERROR("failed to map registers\n");
+
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	struct pci_dev *pdev = i915->drm.pdev;
+
+	pci_iounmap(pdev, uncore->regs);
+}
+
+
+int intel_uncore_init(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	int ret;
+
+	ret = uncore_mmio_setup(uncore);
+	if (ret)
+		return ret;
 
 	i915_check_vgpu(i915);
 
@@ -1589,6 +1632,8 @@ void intel_uncore_init(struct intel_uncore *uncore)
 	}
 
 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+	return 0;
 }
 
 /*
@@ -1637,6 +1682,7 @@ void intel_uncore_fini(struct intel_uncore *uncore)
 		&uncore->pmic_bus_access_nb);
 	intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
+	uncore_mmio_cleanup(uncore);
 }
 
 static const struct reg_whitelist {
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b1b596c814516..d345e5ab04a54 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -93,6 +93,8 @@ struct intel_forcewake_range {
 };
 
 struct intel_uncore {
+	void __iomem *regs;
+
 	spinlock_t lock; /** lock is also taken in irq contexts. */
 
 	const struct intel_forcewake_range *fw_domains_table;
@@ -142,7 +144,7 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct intel_uncore *uncore);
+int intel_uncore_init(struct intel_uncore *uncore);
 void intel_uncore_prune(struct intel_uncore *uncore);
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 6146a7ba762bc..c69a1bad89335 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -177,7 +177,7 @@ static int live_forcewake_ops(void *arg)
 
 	for_each_engine(engine, i915, id) {
 		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
-		u32 __iomem *reg = i915->regs + engine->mmio_base + r->offset;
+		u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
 		enum forcewake_domains fw_domains;
 		u32 val;
 

From 6ebc9692a7add632eb4d8ec3dcd1530bc4bbff08 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 19 Mar 2019 11:35:41 -0700
Subject: [PATCH 26/95] drm/i915: make raw access function work on uncore

This allows us to ditch i915 in some more places.

v2: use local var in check_vgpu (Paulo)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-9-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h     | 14 ++---
 drivers/gpu/drm/i915/i915_vgpu.c    |  7 ++-
 drivers/gpu/drm/i915/intel_uncore.c | 90 +++++++++++++++--------------
 3 files changed, 57 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54c9d82590a4a..b35d2cb260dad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3484,17 +3484,17 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
 
 #define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
+static inline uint##x##_t __raw_i915_read##x(const struct intel_uncore *uncore, \
 					     i915_reg_t reg) \
 { \
-	return read##s(dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
+	return read##s(uncore->regs + i915_mmio_reg_offset(reg)); \
 }
 
 #define __raw_write(x, s) \
-static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
+static inline void __raw_i915_write##x(const struct intel_uncore *uncore, \
 				       i915_reg_t reg, uint##x##_t val) \
 { \
-	write##s(val, dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
+	write##s(val, uncore->regs + i915_mmio_reg_offset(reg)); \
 }
 __raw_read(8, b)
 __raw_read(16, w)
@@ -3535,9 +3535,9 @@ __raw_write(64, q)
  * therefore generally be serialised, by either the dev_priv->uncore.lock or
  * a more localised lock guarding all access to that bank of registers.
  */
-#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
+#define I915_READ_FW(reg__) __raw_i915_read32(&dev_priv->uncore, (reg__))
+#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(&dev_priv->uncore, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(&dev_priv->uncore, (reg__), (val__))
 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
 /* "Broadcast RGB" property */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 3b2d83f704e30..3d0b493e42003 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,22 +60,23 @@
  */
 void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u64 magic;
 	u16 version_major;
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-	magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
+	magic = __raw_i915_read64(uncore, vgtif_reg(magic));
 	if (magic != VGT_MAGIC)
 		return;
 
-	version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+	version_major = __raw_i915_read16(uncore, vgtif_reg(version_major));
 	if (version_major < VGT_VERSION_MAJOR) {
 		DRM_INFO("VGT interface version mismatch!\n");
 		return;
 	}
 
-	dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
+	dev_priv->vgpu.caps = __raw_i915_read32(uncore, vgtif_reg(vgt_caps));
 
 	dev_priv->vgpu.active = true;
 	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e60856069971f..1816eeae3ba92 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -31,7 +31,7 @@
 #define FORCEWAKE_ACK_TIMEOUT_MS 50
 #define GT_FIFO_TIMEOUT_MS	 10
 
-#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
+#define __raw_posting_read(uncore__, reg__) (void)__raw_i915_read32((uncore__), (reg__))
 
 static const char * const forcewake_domain_names[] = {
 	"render",
@@ -275,23 +275,23 @@ fw_domains_reset(struct intel_uncore *uncore,
 		fw_domain_reset(d);
 }
 
-static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+static inline u32 gt_thread_status(struct intel_uncore *uncore)
 {
 	u32 val;
 
-	val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+	val = __raw_i915_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
 
 	return val;
 }
 
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
 {
 	/*
 	 * w/a for a sporadic read returning 0 by waiting for the GT
 	 * thread to wake up.
 	 */
-	WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+	WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
 		  "GT thread status wait timed out\n");
 }
 
@@ -301,30 +301,29 @@ static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
 	fw_domains_get(uncore, fw_domains);
 
 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
-	__gen6_gt_wait_for_thread_c0(uncore_to_i915(uncore));
+	__gen6_gt_wait_for_thread_c0(uncore);
 }
 
-static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+static inline u32 fifo_free_entries(struct intel_uncore *uncore)
 {
-	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+	u32 count = __raw_i915_read32(uncore, GTFIFOCTL);
 
 	return count & GT_FIFO_FREE_ENTRIES_MASK;
 }
 
-static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 n;
 
 	/* On VLV, FIFO will be shared by both SW and HW.
 	 * So, we need to read the FREE_ENTRIES everytime */
-	if (IS_VALLEYVIEW(dev_priv))
-		n = fifo_free_entries(dev_priv);
+	if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+		n = fifo_free_entries(uncore);
 	else
 		n = uncore->fifo_count;
 
 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
-		if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
+		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
 				    GT_FIFO_NUM_RESERVED_ENTRIES,
 				    GT_FIFO_TIMEOUT_MS)) {
 			DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
@@ -452,7 +451,7 @@ static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 	if (IS_HASWELL(dev_priv) ||
 	    IS_BROADWELL(dev_priv) ||
 	    INTEL_GEN(dev_priv) >= 9) {
-		dev_priv->edram_cap = __raw_i915_read32(dev_priv,
+		dev_priv->edram_cap = __raw_i915_read32(&dev_priv->uncore,
 							HSW_EDRAM_CAP);
 
 		/* NB: We can't write IDICR yet because we do not have gt funcs
@@ -467,43 +466,43 @@ static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 }
 
 static bool
-fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
 	u32 dbg;
 
-	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+	dbg = __raw_i915_read32(uncore, FPGA_DBG);
 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 		return false;
 
-	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	__raw_i915_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
 	return true;
 }
 
 static bool
-vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
 	u32 cer;
 
-	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+	cer = __raw_i915_read32(uncore, CLAIM_ER);
 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 		return false;
 
-	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+	__raw_i915_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
 
 	return true;
 }
 
 static bool
-gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
+gen6_check_for_fifo_debug(struct intel_uncore *uncore)
 {
 	u32 fifodbg;
 
-	fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+	fifodbg = __raw_i915_read32(uncore, GTFIFODBG);
 
 	if (unlikely(fifodbg)) {
 		DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
-		__raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
+		__raw_i915_write32(uncore, GTFIFODBG, fifodbg);
 	}
 
 	return fifodbg;
@@ -512,16 +511,17 @@ gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
 static bool
 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	bool ret = false;
 
 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
-		ret |= fpga_check_for_unclaimed_mmio(dev_priv);
+		ret |= fpga_check_for_unclaimed_mmio(uncore);
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret |= vlv_check_for_unclaimed_mmio(dev_priv);
+		ret |= vlv_check_for_unclaimed_mmio(uncore);
 
 	if (IS_GEN_RANGE(dev_priv, 6, 7))
-		ret |= gen6_check_for_fifo_debug(dev_priv);
+		ret |= gen6_check_for_fifo_debug(uncore);
 
 	return ret;
 }
@@ -537,8 +537,8 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 
 	/* WaDisableShadowRegForCpd:chv */
 	if (IS_CHERRYVIEW(i915)) {
-		__raw_i915_write32(i915, GTFIFOCTL,
-				   __raw_i915_read32(i915, GTFIFOCTL) |
+		__raw_i915_write32(uncore, GTFIFOCTL,
+				   __raw_i915_read32(uncore, GTFIFOCTL) |
 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 				   GT_FIFO_CTL_RC6_POLICY_STALL);
 	}
@@ -550,7 +550,7 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 		uncore->funcs.force_wake_get(uncore, restore_forcewake);
 
 		if (IS_GEN_RANGE(i915, 6, 7))
-			uncore->fifo_count = fifo_free_entries(i915);
+			uncore->fifo_count = fifo_free_entries(uncore);
 		spin_unlock_irq(&uncore->lock);
 	}
 	iosf_mbi_punit_release();
@@ -1063,12 +1063,12 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
 };
 
 static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
+ilk_dummy_write(struct intel_uncore *uncore)
 {
 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
 	 * hence harmless to write 0 into. */
-	__raw_i915_write32(dev_priv, MI_MODE, 0);
+	__raw_i915_write32(uncore, MI_MODE, 0);
 }
 
 static void
@@ -1098,6 +1098,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 }
 
 #define GEN2_READ_HEADER(x) \
+	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u##x val = 0; \
 	assert_rpm_wakelock_held(dev_priv);
 
@@ -1109,7 +1110,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 static u##x \
 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
-	val = __raw_i915_read##x(dev_priv, reg); \
+	val = __raw_i915_read##x(uncore, reg); \
 	GEN2_READ_FOOTER; \
 }
 
@@ -1117,8 +1118,8 @@ gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 static u##x \
 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
-	ilk_dummy_write(dev_priv); \
-	val = __raw_i915_read##x(dev_priv, reg); \
+	ilk_dummy_write(uncore); \
+	val = __raw_i915_read##x(uncore, reg); \
 	GEN2_READ_FOOTER; \
 }
 
@@ -1188,7 +1189,7 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
 	fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
 	if (fw_engine) \
 		__force_wake_auto(uncore, fw_engine); \
-	val = __raw_i915_read##x(dev_priv, reg); \
+	val = __raw_i915_read##x(uncore, reg); \
 	GEN6_READ_FOOTER; \
 }
 #define __gen6_read(x) __gen_read(gen6, x)
@@ -1215,6 +1216,7 @@ __gen6_read(64)
 #undef GEN6_READ_HEADER
 
 #define GEN2_WRITE_HEADER \
+	struct intel_uncore *uncore = &dev_priv->uncore; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	assert_rpm_wakelock_held(dev_priv); \
 
@@ -1224,7 +1226,7 @@ __gen6_read(64)
 static void \
 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
-	__raw_i915_write##x(dev_priv, reg, val); \
+	__raw_i915_write##x(uncore, reg, val); \
 	GEN2_WRITE_FOOTER; \
 }
 
@@ -1232,8 +1234,8 @@ gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 static void \
 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
-	ilk_dummy_write(dev_priv); \
-	__raw_i915_write##x(dev_priv, reg, val); \
+	ilk_dummy_write(uncore); \
+	__raw_i915_write##x(uncore, reg, val); \
 	GEN2_WRITE_FOOTER; \
 }
 
@@ -1268,8 +1270,8 @@ static void \
 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN6_WRITE_HEADER; \
 	if (NEEDS_FORCE_WAKE(offset)) \
-		__gen6_gt_wait_for_fifo(dev_priv); \
-	__raw_i915_write##x(dev_priv, reg, val); \
+		__gen6_gt_wait_for_fifo(uncore); \
+	__raw_i915_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 
@@ -1281,7 +1283,7 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
 	fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
 	if (fw_engine) \
 		__force_wake_auto(uncore, fw_engine); \
-	__raw_i915_write##x(dev_priv, reg, val); \
+	__raw_i915_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 #define __gen8_write(x) __gen_write(gen8, x)
@@ -1468,15 +1470,15 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		 * before the ecobus check.
 		 */
 
-		__raw_i915_write32(i915, FORCEWAKE, 0);
-		__raw_posting_read(i915, ECOBUS);
+		__raw_i915_write32(uncore, FORCEWAKE, 0);
+		__raw_posting_read(uncore, ECOBUS);
 
 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
 		spin_lock_irq(&uncore->lock);
 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
-		ecobus = __raw_i915_read32(i915, ECOBUS);
+		ecobus = __raw_i915_read32(uncore, ECOBUS);
 		fw_domains_put(uncore, FORCEWAKE_RENDER);
 		spin_unlock_irq(&uncore->lock);
 

From ab7529f2441791cdfe6c8e3202f35f3e811985a6 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 20 Mar 2019 15:40:21 +0000
Subject: [PATCH 27/95] drm/i915: Use __is_constexpr()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

gcc-4.8 and older dislike the use of __builtin_constant_p() within a
constant expression context, and so we must use the magical
__is_constexpr() instead.

For example, with gcc-4.8.5:
../drivers/gpu/drm/i915/i915_reg.h:167:27: error: first argument to ‘__builtin_choose_expr’ not a constant
../include/linux/build_bug.h:16:45: error: bit-field ‘<anonymous>’ width not an integer constant

Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Uma Shankar <uma.shankar@intel.com>
Fixes: baa09e7d2f42 ("drm/i915: use REG_FIELD_PREP() to define register bitfield values")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190320154021.5244-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_reg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7a9d867eb49b8..b46910453e612 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -161,10 +161,10 @@
  */
 #define REG_FIELD_PREP(__mask, __val)						\
 	((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
-	       BUILD_BUG_ON_ZERO(!__builtin_constant_p(__mask)) +		\
+	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
 	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) +		\
 	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
-	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
 
 /**
  * REG_FIELD_GET() - Extract a u32 bitfield value

From 401f147b16d90ae378ec8218679e6fdba96e13cf Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Thu, 21 Mar 2019 12:24:51 +0300
Subject: [PATCH 28/95] drm/i915/selftests: fix NULL vs IS_ERR() check in
 mock_context_barrier()

The mock_context() function returns NULL on error, it doesn't return
error pointers.

Fixes: 85fddf0b0027 ("drm/i915: Introduce a context barrier callback")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321092451.GK2202@kadam
---
 drivers/gpu/drm/i915/selftests/i915_gem_context.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 4399ef9ebf158..a172dbd9cb9e4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -1620,8 +1620,8 @@ static int mock_context_barrier(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 
 	ctx = mock_context(i915, "mock");
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
+	if (!ctx) {
+		err = -ENOMEM;
 		goto unlock;
 	}
 

From 3aa9945a528e7616b5c8fe5d7aa7d4aaf52b0af2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 14:07:08 +0000
Subject: [PATCH 29/95] drm/i915: Separate GEM context construction and
 registration to userspace

In later patches, it became apparent that userspace can see a partially
constructed GEM context and begin using it before it was ready, to much
hilarity. Close this window of opportunity by lifting the registration of
the context with userspace (the insertion of the context into the filp's
idr) to the very end of the CONTEXT_CREATE ioctl.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321140711.11190-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c       | 138 ++++++++++--------
 drivers/gpu/drm/i915/i915_gem_gtt.c           |   7 +-
 drivers/gpu/drm/i915/i915_gem_gtt.h           |   8 +-
 drivers/gpu/drm/i915/selftests/huge_pages.c   |   2 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c |  12 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   2 +-
 drivers/gpu/drm/i915/selftests/mock_context.c |  17 ++-
 7 files changed, 111 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d776d43707e0b..c7cbc66945f08 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,15 +337,13 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
 }
 
 static struct i915_gem_context *
-__create_hw_context(struct drm_i915_private *dev_priv,
-		    struct drm_i915_file_private *file_priv)
+__create_context(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
-	int ret;
 	int i;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (ctx == NULL)
+	if (!ctx)
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&ctx->ref);
@@ -362,29 +360,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	INIT_LIST_HEAD(&ctx->handles_list);
 	INIT_LIST_HEAD(&ctx->hw_id_link);
 
-	/* Default context will never have a file_priv */
-	ret = DEFAULT_CONTEXT_HANDLE;
-	if (file_priv) {
-		ret = idr_alloc(&file_priv->context_idr, ctx,
-				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
-		if (ret < 0)
-			goto err_lut;
-	}
-	ctx->user_handle = ret;
-
-	ctx->file_priv = file_priv;
-	if (file_priv) {
-		ctx->pid = get_task_pid(current, PIDTYPE_PID);
-		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
-				      current->comm,
-				      pid_nr(ctx->pid),
-				      ctx->user_handle);
-		if (!ctx->name) {
-			ret = -ENOMEM;
-			goto err_pid;
-		}
-	}
-
 	/* NB: Mark all slices as needing a remap so that when the context first
 	 * loads it will restore whatever remap state already exists. If there
 	 * is no remap info, it will be a NOP. */
@@ -401,25 +376,10 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
 	return ctx;
-
-err_pid:
-	put_pid(ctx->pid);
-	idr_remove(&file_priv->context_idr, ctx->user_handle);
-err_lut:
-	context_close(ctx);
-	return ERR_PTR(ret);
-}
-
-static void __destroy_hw_context(struct i915_gem_context *ctx,
-				 struct drm_i915_file_private *file_priv)
-{
-	idr_remove(&file_priv->context_idr, ctx->user_handle);
-	context_close(ctx);
 }
 
 static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv,
-			struct drm_i915_file_private *file_priv)
+i915_gem_create_context(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
 
@@ -428,18 +388,18 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 	/* Reap the most stale context */
 	contexts_free_first(dev_priv);
 
-	ctx = __create_hw_context(dev_priv, file_priv);
+	ctx = __create_context(dev_priv);
 	if (IS_ERR(ctx))
 		return ctx;
 
 	if (HAS_FULL_PPGTT(dev_priv)) {
 		struct i915_hw_ppgtt *ppgtt;
 
-		ppgtt = i915_ppgtt_create(dev_priv, file_priv);
+		ppgtt = i915_ppgtt_create(dev_priv);
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
-			__destroy_hw_context(ctx, file_priv);
+			context_close(ctx);
 			return ERR_CAST(ppgtt);
 		}
 
@@ -475,7 +435,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (ret)
 		return ERR_PTR(ret);
 
-	ctx = i915_gem_create_context(to_i915(dev), NULL);
+	ctx = i915_gem_create_context(to_i915(dev));
 	if (IS_ERR(ctx))
 		goto out;
 
@@ -511,7 +471,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	struct i915_gem_context *ctx;
 	int err;
 
-	ctx = i915_gem_create_context(i915, NULL);
+	ctx = i915_gem_create_context(i915);
 	if (IS_ERR(ctx))
 		return ctx;
 
@@ -625,25 +585,74 @@ static int context_idr_cleanup(int id, void *p, void *data)
 	return 0;
 }
 
+static int gem_context_register(struct i915_gem_context *ctx,
+				struct drm_i915_file_private *fpriv)
+{
+	int ret;
+
+	ctx->file_priv = fpriv;
+	if (ctx->ppgtt)
+		ctx->ppgtt->vm.file = fpriv;
+
+	ctx->pid = get_task_pid(current, PIDTYPE_PID);
+	ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
+			      current->comm, pid_nr(ctx->pid));
+	if (!ctx->name) {
+		ret = -ENOMEM;
+		goto err_pid;
+	}
+
+	/* And finally expose ourselves to userspace via the idr */
+	ret = idr_alloc(&fpriv->context_idr, ctx,
+			DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
+	if (ret < 0)
+		goto err_name;
+
+	ctx->user_handle = ret;
+
+	return 0;
+
+err_name:
+	kfree(fetch_and_zero(&ctx->name));
+err_pid:
+	put_pid(fetch_and_zero(&ctx->pid));
+	return ret;
+}
+
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_gem_context *ctx;
+	int err;
 
 	idr_init(&file_priv->context_idr);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	ctx = i915_gem_create_context(i915, file_priv);
-	mutex_unlock(&i915->drm.struct_mutex);
+
+	ctx = i915_gem_create_context(i915);
 	if (IS_ERR(ctx)) {
-		idr_destroy(&file_priv->context_idr);
-		return PTR_ERR(ctx);
+		err = PTR_ERR(ctx);
+		goto err;
 	}
 
+	err = gem_context_register(ctx, file_priv);
+	if (err)
+		goto err_ctx;
+
+	GEM_BUG_ON(ctx->user_handle != DEFAULT_CONTEXT_HANDLE);
 	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 
+	mutex_unlock(&i915->drm.struct_mutex);
+
 	return 0;
+
+err_ctx:
+	context_close(ctx);
+err:
+	mutex_unlock(&i915->drm.struct_mutex);
+	idr_destroy(&file_priv->context_idr);
+	return PTR_ERR(ctx);
 }
 
 void i915_gem_context_close(struct drm_file *file)
@@ -835,17 +844,28 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	ctx = i915_gem_create_context(i915, file_priv);
-	mutex_unlock(&dev->struct_mutex);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ctx = i915_gem_create_context(i915);
+	if (IS_ERR(ctx)) {
+		ret = PTR_ERR(ctx);
+		goto err_unlock;
+	}
 
-	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+	ret = gem_context_register(ctx, file_priv);
+	if (ret)
+		goto err_ctx;
+
+	mutex_unlock(&dev->struct_mutex);
 
 	args->ctx_id = ctx->user_handle;
 	DRM_DEBUG("HW context %d created\n", args->ctx_id);
 
 	return 0;
+
+err_ctx:
+	context_close(ctx);
+err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -870,7 +890,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	__destroy_hw_context(ctx, file_priv);
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
+	context_close(ctx);
+
 	mutex_unlock(&dev->struct_mutex);
 
 out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b8055c8d4e719..b9e0e3a002239 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2069,8 +2069,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
 }
 
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915,
-		  struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *i915)
 {
 	struct i915_hw_ppgtt *ppgtt;
 
@@ -2078,8 +2077,6 @@ i915_ppgtt_create(struct drm_i915_private *i915,
 	if (IS_ERR(ppgtt))
 		return ppgtt;
 
-	ppgtt->vm.file = fpriv;
-
 	trace_i915_ppgtt_create(&ppgtt->vm);
 
 	return ppgtt;
@@ -2657,7 +2654,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
 	struct i915_hw_ppgtt *ppgtt;
 	int err;
 
-	ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
+	ppgtt = i915_ppgtt_create(i915);
 	if (IS_ERR(ppgtt))
 		return PTR_ERR(ppgtt);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 35f21a2ae36c6..b76ab4c2a0e6f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -603,15 +603,17 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 
 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
-void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
-					struct drm_i915_file_private *fpriv);
+
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
 void i915_ppgtt_close(struct i915_address_space *vm);
+void i915_ppgtt_release(struct kref *kref);
+
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
 	if (ppgtt)
 		kref_get(&ppgtt->ref);
 }
+
 static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
 {
 	if (ppgtt)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 2e1db30af477b..1f419fb094f40 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1713,7 +1713,7 @@ int i915_gem_huge_page_mock_selftests(void)
 	mkwrite_device_info(dev_priv)->ppgtt_size = 48;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
+	ppgtt = i915_ppgtt_create(dev_priv);
 	if (IS_ERR(ppgtt)) {
 		err = PTR_ERR(ppgtt);
 		goto out_unlock;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index a172dbd9cb9e4..ab2aa32fd0196 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
 	}
 
 	for (n = 0; n < nctx; n++) {
-		ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+		ctx[n] = live_context(i915, file);
 		if (IS_ERR(ctx[n])) {
 			err = PTR_ERR(ctx[n]);
 			goto out_unlock;
@@ -513,7 +513,7 @@ static int igt_ctx_exec(void *arg)
 		struct i915_gem_context *ctx;
 		unsigned int id;
 
-		ctx = i915_gem_create_context(i915, file->driver_priv);
+		ctx = live_context(i915, file);
 		if (IS_ERR(ctx)) {
 			err = PTR_ERR(ctx);
 			goto out_unlock;
@@ -962,7 +962,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 
 	mutex_lock(&i915->drm.struct_mutex);
 
-	ctx = i915_gem_create_context(i915, file->driver_priv);
+	ctx = live_context(i915, file);
 	if (IS_ERR(ctx)) {
 		ret = PTR_ERR(ctx);
 		goto out_unlock;
@@ -1072,7 +1072,7 @@ static int igt_ctx_readonly(void *arg)
 	if (err)
 		goto out_unlock;
 
-	ctx = i915_gem_create_context(i915, file->driver_priv);
+	ctx = live_context(i915, file);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
 		goto out_unlock;
@@ -1397,13 +1397,13 @@ static int igt_vm_isolation(void *arg)
 	if (err)
 		goto out_unlock;
 
-	ctx_a = i915_gem_create_context(i915, file->driver_priv);
+	ctx_a = live_context(i915, file);
 	if (IS_ERR(ctx_a)) {
 		err = PTR_ERR(ctx_a);
 		goto out_unlock;
 	}
 
-	ctx_b = i915_gem_create_context(i915, file->driver_priv);
+	ctx_b = live_context(i915, file);
 	if (IS_ERR(ctx_b)) {
 		err = PTR_ERR(ctx_b);
 		goto out_unlock;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 826fd51c331ee..01084f6b4fb78 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 		return PTR_ERR(file);
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+	ppgtt = i915_ppgtt_create(dev_priv);
 	if (IS_ERR(ppgtt)) {
 		err = PTR_ERR(ppgtt);
 		goto out_unlock;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 8efa6892c6cd9..ad3875b01722d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -88,9 +88,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
 struct i915_gem_context *
 live_context(struct drm_i915_private *i915, struct drm_file *file)
 {
+	struct i915_gem_context *ctx;
+	int err;
+
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	return i915_gem_create_context(i915, file->driver_priv);
+	ctx = i915_gem_create_context(i915);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	err = gem_context_register(ctx, file->driver_priv);
+	if (err)
+		goto err_ctx;
+
+	return ctx;
+
+err_ctx:
+	context_close(ctx);
+	return ERR_PTR(err);
 }
 
 struct i915_gem_context *

From 7dc40713618c884bf07c030d1ab1f47a9dc1f310 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 14:07:09 +0000
Subject: [PATCH 30/95] drm/i915: Introduce a mutex for file_priv->context_idr

Define a mutex for the exclusive use of interacting with the per-file
context-idr, that was previously guarded by struct_mutex. This allows us
to reduce the coverage of struct_mutex, with a view to removing the last
bits coordinating GEM context later. (In the short term, we avoid taking
struct_mutex while using the extended constructor functions, preventing
some nasty recursion.)

v2: s/context_lock/context_idr_lock/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321140711.11190-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h         |  2 ++
 drivers/gpu/drm/i915/i915_gem_context.c | 47 +++++++++++--------------
 2 files changed, 23 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b35d2cb260dad..b6d674aa2786a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -216,7 +216,9 @@ struct drm_i915_file_private {
  */
 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
 	} mm;
+
 	struct idr context_idr;
+	struct mutex context_idr_lock; /* guards context_idr */
 
 	unsigned int bsd_engine;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c7cbc66945f08..9c6987ee27e5d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -579,9 +579,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 
 static int context_idr_cleanup(int id, void *p, void *data)
 {
-	struct i915_gem_context *ctx = p;
-
-	context_close(ctx);
+	context_close(p);
 	return 0;
 }
 
@@ -603,13 +601,15 @@ static int gem_context_register(struct i915_gem_context *ctx,
 	}
 
 	/* And finally expose ourselves to userspace via the idr */
+	mutex_lock(&fpriv->context_idr_lock);
 	ret = idr_alloc(&fpriv->context_idr, ctx,
 			DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
+	if (ret >= 0)
+		ctx->user_handle = ret;
+	mutex_unlock(&fpriv->context_idr_lock);
 	if (ret < 0)
 		goto err_name;
 
-	ctx->user_handle = ret;
-
 	return 0;
 
 err_name:
@@ -627,10 +627,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	int err;
 
 	idr_init(&file_priv->context_idr);
+	mutex_init(&file_priv->context_idr_lock);
 
 	mutex_lock(&i915->drm.struct_mutex);
-
 	ctx = i915_gem_create_context(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
 		goto err;
@@ -643,14 +644,14 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	GEM_BUG_ON(ctx->user_handle != DEFAULT_CONTEXT_HANDLE);
 	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	return 0;
 
 err_ctx:
+	mutex_lock(&i915->drm.struct_mutex);
 	context_close(ctx);
-err:
 	mutex_unlock(&i915->drm.struct_mutex);
+err:
+	mutex_destroy(&file_priv->context_idr_lock);
 	idr_destroy(&file_priv->context_idr);
 	return PTR_ERR(ctx);
 }
@@ -663,6 +664,7 @@ void i915_gem_context_close(struct drm_file *file)
 
 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
 	idr_destroy(&file_priv->context_idr);
+	mutex_destroy(&file_priv->context_idr_lock);
 }
 
 static struct i915_request *
@@ -845,25 +847,22 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	ctx = i915_gem_create_context(i915);
-	if (IS_ERR(ctx)) {
-		ret = PTR_ERR(ctx);
-		goto err_unlock;
-	}
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
 
 	ret = gem_context_register(ctx, file_priv);
 	if (ret)
 		goto err_ctx;
 
-	mutex_unlock(&dev->struct_mutex);
-
 	args->ctx_id = ctx->user_handle;
 	DRM_DEBUG("HW context %d created\n", args->ctx_id);
 
 	return 0;
 
 err_ctx:
+	mutex_lock(&dev->struct_mutex);
 	context_close(ctx);
-err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
@@ -874,7 +873,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_context_destroy *args = data;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_gem_context *ctx;
-	int ret;
 
 	if (args->pad != 0)
 		return -EINVAL;
@@ -882,21 +880,18 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
 		return -ENOENT;
 
-	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+	if (mutex_lock_interruptible(&file_priv->context_idr_lock))
+		return -EINTR;
+
+	ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
+	mutex_unlock(&file_priv->context_idr_lock);
 	if (!ctx)
 		return -ENOENT;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		goto out;
-
-	idr_remove(&file_priv->context_idr, ctx->user_handle);
+	mutex_lock(&dev->struct_mutex);
 	context_close(ctx);
-
 	mutex_unlock(&dev->struct_mutex);
 
-out:
-	i915_gem_context_put(ctx);
 	return 0;
 }
 

From 3e05531243d032bbff157999c99a30a969966927 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 14:07:10 +0000
Subject: [PATCH 31/95] drm/i915: Stop storing ctx->user_handle

The user_handle need only be known by userspace for it to lookup the
context via the idr; internally we have no use for it.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321140711.11190-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c           |  5 ++--
 drivers/gpu/drm/i915/i915_gem_context.c       | 23 ++++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.h       |  5 ----
 drivers/gpu/drm/i915/i915_gem_context_types.h |  9 --------
 drivers/gpu/drm/i915/i915_gpu_error.c         | 11 ++++-----
 drivers/gpu/drm/i915/i915_gpu_error.h         |  1 -
 drivers/gpu/drm/i915/selftests/mock_context.c |  2 +-
 7 files changed, 16 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0dd8b3fa7fb98..47bf07a59b5e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -409,9 +409,8 @@ static void print_context_stats(struct seq_file *m,
 
 			rcu_read_lock();
 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
-			snprintf(name, sizeof(name), "%s/%d",
-				 task ? task->comm : "<unknown>",
-				 ctx->user_handle);
+			snprintf(name, sizeof(name), "%s",
+				 task ? task->comm : "<unknown>");
 			rcu_read_unlock();
 
 			print_file_stats(m, name, stats);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9c6987ee27e5d..9187910391d8f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -602,20 +602,15 @@ static int gem_context_register(struct i915_gem_context *ctx,
 
 	/* And finally expose ourselves to userspace via the idr */
 	mutex_lock(&fpriv->context_idr_lock);
-	ret = idr_alloc(&fpriv->context_idr, ctx,
-			DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
-	if (ret >= 0)
-		ctx->user_handle = ret;
+	ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
 	mutex_unlock(&fpriv->context_idr_lock);
-	if (ret < 0)
-		goto err_name;
-
-	return 0;
+	if (ret >= 0)
+		goto out;
 
-err_name:
 	kfree(fetch_and_zero(&ctx->name));
 err_pid:
 	put_pid(fetch_and_zero(&ctx->pid));
+out:
 	return ret;
 }
 
@@ -638,11 +633,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	}
 
 	err = gem_context_register(ctx, file_priv);
-	if (err)
+	if (err < 0)
 		goto err_ctx;
 
-	GEM_BUG_ON(ctx->user_handle != DEFAULT_CONTEXT_HANDLE);
 	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+	GEM_BUG_ON(err > 0);
 
 	return 0;
 
@@ -852,10 +847,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 		return PTR_ERR(ctx);
 
 	ret = gem_context_register(ctx, file_priv);
-	if (ret)
+	if (ret < 0)
 		goto err_ctx;
 
-	args->ctx_id = ctx->user_handle;
+	args->ctx_id = ret;
 	DRM_DEBUG("HW context %d created\n", args->ctx_id);
 
 	return 0;
@@ -877,7 +872,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	if (args->pad != 0)
 		return -EINVAL;
 
-	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
+	if (!args->ctx_id)
 		return -ENOENT;
 
 	if (mutex_lock_interruptible(&file_priv->context_idr_lock))
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 5a32c4b4816f8..7e79d1ffbaef1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -126,11 +126,6 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
 	atomic_dec(&ctx->hw_id_pin_count);
 }
 
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
-	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 {
 	return !ctx->file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
index 2bf19730eaa97..63ae8eb219393 100644
--- a/drivers/gpu/drm/i915/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -129,15 +129,6 @@ struct i915_gem_context {
 	struct list_head active_engines;
 	struct mutex mutex;
 
-	/**
-	 * @user_handle: userspace identifier
-	 *
-	 * A unique per-file identifier is generated from
-	 * &drm_i915_file_private.contexts.
-	 */
-	u32 user_handle;
-#define DEFAULT_CONTEXT_HANDLE 0
-
 	struct i915_sched_attr sched;
 
 	/** hw_contexts: per-engine logical HW state */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 26bac517e3836..a9557f92756f6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -454,8 +454,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
 				const struct drm_i915_error_context *ctx)
 {
-	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, guilty %d active %d\n",
-		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
+	err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
+		   header, ctx->comm, ctx->pid, ctx->hw_id,
 		   ctx->sched_attr.priority, ctx->guilty, ctx->active);
 }
 
@@ -758,11 +758,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 		if (obj) {
 			err_puts(m, m->i915->engine[i]->name);
 			if (ee->context.pid)
-				err_printf(m, " (submitted by %s [%d], ctx %d [%d])",
+				err_printf(m, " (submitted by %s [%d])",
 					   ee->context.comm,
-					   ee->context.pid,
-					   ee->context.handle,
-					   ee->context.hw_id);
+					   ee->context.pid);
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
@@ -1330,7 +1328,6 @@ static void record_context(struct drm_i915_error_context *e,
 		rcu_read_unlock();
 	}
 
-	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
 	e->sched_attr = ctx->sched;
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 99d6b7b270c20..302a14240b456 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -116,7 +116,6 @@ struct i915_gpu_state {
 		struct drm_i915_error_context {
 			char comm[TASK_COMM_LEN];
 			pid_t pid;
-			u32 handle;
 			u32 hw_id;
 			int active;
 			int guilty;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index ad3875b01722d..bc14e9aec2748 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -98,7 +98,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 		return ctx;
 
 	err = gem_context_register(ctx, file->driver_priv);
-	if (err)
+	if (err < 0)
 		goto err_ctx;
 
 	return ctx;

From 4daffb664a69532efdfee54f3eac5ce54e8c37dd Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 14:07:11 +0000
Subject: [PATCH 32/95] drm/i915: Stop storing the context name as the timeline
 name

The timeline->name is only used for convenience in pretty printing the
i915_request.fence->ops->get_timeline_name() and it is just as
convenient to pull it from the gem_context directly. The few instances
of its use inside GEM_TRACE() has proven more of a nuisance than
helpful, so not worth saving imo.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321140711.11190-4-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c        | 5 ++---
 drivers/gpu/drm/i915/i915_request.c            | 7 ++-----
 drivers/gpu/drm/i915/i915_timeline.c           | 5 +----
 drivers/gpu/drm/i915/i915_timeline.h           | 2 --
 drivers/gpu/drm/i915/i915_timeline_types.h     | 1 -
 drivers/gpu/drm/i915/intel_engine_cs.c         | 3 +--
 drivers/gpu/drm/i915/intel_lrc.c               | 2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c        | 4 +---
 drivers/gpu/drm/i915/selftests/i915_timeline.c | 6 +++---
 drivers/gpu/drm/i915/selftests/mock_engine.c   | 9 ++-------
 10 files changed, 13 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9187910391d8f..00dec72f68758 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -673,9 +673,8 @@ last_request_on_engine(struct i915_timeline *timeline,
 	rq = i915_active_request_raw(&timeline->last_request,
 				     &engine->i915->drm.struct_mutex);
 	if (rq && rq->engine == engine) {
-		GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
-			  timeline->name, engine->name,
-			  rq->fence.context, rq->fence.seqno);
+		GEM_TRACE("last request on engine %s: %llx:%llu\n",
+			  engine->name, rq->fence.context, rq->fence.seqno);
 		GEM_BUG_ON(rq->timeline != timeline);
 		return rq;
 	}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0a3d94517d0a3..1529824d7c613 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -66,7 +66,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->timeline->name;
+	return to_request(fence)->gem_context->name ?: "[i915]";
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -167,7 +167,6 @@ static void advance_ring(struct i915_request *request)
 		 * is just about to be. Either works, if we miss the last two
 		 * noops - they are safe to be replayed on a reset.
 		 */
-		GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
 		tail = READ_ONCE(request->tail);
 		list_del(&ring->active_link);
 	} else {
@@ -1064,10 +1063,8 @@ void i915_request_add(struct i915_request *request)
 	__i915_active_request_set(&timeline->last_request, request);
 
 	list_add_tail(&request->ring_link, &ring->request_list);
-	if (list_is_first(&request->ring_link, &ring->request_list)) {
-		GEM_TRACE("marking %s as active\n", ring->timeline->name);
+	if (list_is_first(&request->ring_link, &ring->request_list))
 		list_add(&ring->active_link, &request->i915->gt.active_rings);
-	}
 	request->i915->gt.active_engines |= request->engine->mask;
 	request->emitted_jiffies = jiffies;
 
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index 8484ba6e51d1c..2f49073649205 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -197,7 +197,6 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
 
 int i915_timeline_init(struct drm_i915_private *i915,
 		       struct i915_timeline *timeline,
-		       const char *name,
 		       struct i915_vma *hwsp)
 {
 	void *vaddr;
@@ -213,7 +212,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
 	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
 
 	timeline->i915 = i915;
-	timeline->name = name;
 	timeline->pin_count = 0;
 	timeline->has_initial_breadcrumb = !hwsp;
 	timeline->hwsp_cacheline = NULL;
@@ -342,7 +340,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
 
 struct i915_timeline *
 i915_timeline_create(struct drm_i915_private *i915,
-		     const char *name,
 		     struct i915_vma *global_hwsp)
 {
 	struct i915_timeline *timeline;
@@ -352,7 +349,7 @@ i915_timeline_create(struct drm_i915_private *i915,
 	if (!timeline)
 		return ERR_PTR(-ENOMEM);
 
-	err = i915_timeline_init(i915, timeline, name, global_hwsp);
+	err = i915_timeline_init(i915, timeline, global_hwsp);
 	if (err) {
 		kfree(timeline);
 		return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 9126c82064903..c1e47a423d850 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -32,7 +32,6 @@
 
 int i915_timeline_init(struct drm_i915_private *i915,
 		       struct i915_timeline *tl,
-		       const char *name,
 		       struct i915_vma *hwsp);
 void i915_timeline_fini(struct i915_timeline *tl);
 
@@ -57,7 +56,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
 
 struct i915_timeline *
 i915_timeline_create(struct drm_i915_private *i915,
-		     const char *name,
 		     struct i915_vma *global_hwsp);
 
 static inline struct i915_timeline *
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
index 8ff146dc05ba2..12ba3c573aa0b 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -71,7 +71,6 @@ struct i915_timeline {
 	struct i915_active_request barrier;
 
 	struct list_head link;
-	const char *name;
 	struct drm_i915_private *i915;
 
 	struct kref kref;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 588c640b5a572..24de34289d686 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -581,7 +581,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
 
 	err = i915_timeline_init(engine->i915,
 				 &engine->timeline,
-				 engine->name,
 				 engine->status_page.vma);
 	if (err)
 		goto err_hwsp;
@@ -660,7 +659,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 		return -ENOMEM;
 
 	if (i915_timeline_init(engine->i915,
-			       &frame->timeline, "measure",
+			       &frame->timeline,
 			       engine->status_page.vma))
 		goto out_frame;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 82ee3f6695643..131b89972a783 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2805,7 +2805,7 @@ populate_lr_context(struct intel_context *ce,
 
 static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
 {
-	return i915_timeline_create(ctx->i915, ctx->name, NULL);
+	return i915_timeline_create(ctx->i915, NULL);
 }
 
 static int execlists_context_deferred_alloc(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 720d39729ead8..03bbdf47e7e48 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1553,9 +1553,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (err)
 		return err;
 
-	timeline = i915_timeline_create(engine->i915,
-					engine->name,
-					engine->status_page.vma);
+	timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
 	if (IS_ERR(timeline)) {
 		err = PTR_ERR(timeline);
 		goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 844701759ffc1..8e7bcaa1eb663 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
 		unsigned long cacheline;
 		int err;
 
-		tl = i915_timeline_create(state->i915, "mock", NULL);
+		tl = i915_timeline_create(state->i915, NULL);
 		if (IS_ERR(tl))
 			return PTR_ERR(tl);
 
@@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
 {
 	struct i915_timeline *tl;
 
-	tl = i915_timeline_create(i915, "live", NULL);
+	tl = i915_timeline_create(i915, NULL);
 	if (IS_ERR(tl))
 		return tl;
 
@@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(i915);
 
-	tl = i915_timeline_create(i915, __func__, NULL);
+	tl = i915_timeline_create(i915, NULL);
 	if (IS_ERR(tl)) {
 		err = PTR_ERR(tl);
 		goto out_rpm;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 61744819172ba..61a8206ed6772 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -50,9 +50,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	if (!ring)
 		return NULL;
 
-	if (i915_timeline_init(engine->i915,
-			       &ring->timeline, engine->name,
-			       NULL)) {
+	if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
 		kfree(ring);
 		return NULL;
 	}
@@ -259,10 +257,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.reset.finish = mock_reset_finish;
 	engine->base.cancel_requests = mock_cancel_requests;
 
-	if (i915_timeline_init(i915,
-			       &engine->base.timeline,
-			       engine->base.name,
-			       NULL))
+	if (i915_timeline_init(i915, &engine->base.timeline, NULL))
 		goto err_free;
 	i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
 

From a679f58d051025db6fa86226c4d35650b75e990f Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 16:19:07 +0000
Subject: [PATCH 33/95] drm/i915: Flush pages on acquisition

When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.

The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.

Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.

Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.

For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h               |  8 +++
 drivers/gpu/drm/i915/i915_gem.c               | 57 ++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_dmabuf.c        |  1 +
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  7 +--
 drivers/gpu/drm/i915/i915_gem_render_state.c  |  2 +-
 drivers/gpu/drm/i915/i915_perf.c              |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        |  4 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 63 +++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 62 +++++++-----------
 drivers/gpu/drm/i915/selftests/huge_pages.c   |  5 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c | 17 ++---
 .../gpu/drm/i915/selftests/i915_gem_dmabuf.c  |  1 +
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |  4 +-
 drivers/gpu/drm/i915/selftests/i915_request.c | 14 ++---
 drivers/gpu/drm/i915/selftests/igt_spinner.c  |  2 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  2 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  5 +-
 .../drm/i915/selftests/intel_workarounds.c    |  3 +
 18 files changed, 127 insertions(+), 134 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6d674aa2786a..fefcb39aefc42 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2965,6 +2965,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 					   enum i915_map_type type);
 
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+				 unsigned long offset,
+				 unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+	__i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
 /**
  * i915_gem_object_unpin_map - releases an earlier mapping
  * @obj: the object to unmap
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1a684b7e8c094..72374e952e4bd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1713,6 +1713,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
  * 2 - Recognise WC as a separate cache domain so that we can flush the
  *     delayed writes via GTT before performing direct access via WC.
  *
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ *     pagefault; swapin remains transparent.
+ *
  * Restrictions:
  *
  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -1740,7 +1743,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
  */
 int i915_gem_mmap_gtt_version(void)
 {
-	return 2;
+	return 3;
 }
 
 static inline struct i915_ggtt_view
@@ -1808,17 +1811,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
-	/* Try to flush the object off the GPU first without holding the lock.
-	 * Upon acquiring the lock, we will perform our sanity checks and then
-	 * repeat the flush holding the lock in the normal manner to catch cases
-	 * where we are gazumped.
-	 */
-	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE,
-				   MAX_SCHEDULE_TIMEOUT);
-	if (ret)
-		goto err;
-
 	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		goto err;
@@ -1874,10 +1866,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, write);
-	if (ret)
-		goto err_unpin;
-
 	ret = i915_vma_pin_fence(vma);
 	if (ret)
 		goto err_unpin;
@@ -2534,6 +2522,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
 	lockdep_assert_held(&obj->mm.lock);
 
+	/* Make the pages coherent with the GPU (flushing any swapin). */
+	if (obj->cache_dirty) {
+		obj->write_domain = 0;
+		if (i915_gem_object_has_struct_page(obj))
+			drm_clflush_sg(pages);
+		obj->cache_dirty = false;
+	}
+
 	obj->mm.get_page.sg_pos = pages->sgl;
 	obj->mm.get_page.sg_idx = 0;
 
@@ -2735,6 +2731,33 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	goto out_unlock;
 }
 
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+				 unsigned long offset,
+				 unsigned long size)
+{
+	enum i915_map_type has_type;
+	void *ptr;
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+				     offset, size, obj->base.size));
+
+	obj->mm.dirty = true;
+
+	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+		return;
+
+	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+	if (has_type == I915_MAP_WC)
+		return;
+
+	drm_clflush_virt_range(ptr + offset, size);
+	if (size == obj->base.size) {
+		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+		obj->cache_dirty = false;
+	}
+}
+
 static int
 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
 			   const struct drm_i915_gem_pwrite *arg)
@@ -4692,6 +4715,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto err_active;
 
 		engine->default_state = i915_gem_object_get(state->obj);
+		i915_gem_object_set_cache_coherency(engine->default_state,
+						    I915_CACHE_LLC);
 
 		/* Check we can acquire the image of the context state */
 		vaddr = i915_gem_object_pin_map(engine->default_state,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 33181678990ec..5a101a9462d85 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 
+	i915_gem_object_flush_map(obj);
 	i915_gem_object_unpin_map(obj);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee6d301a96277..3d672c9edb941 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
 {
 	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
 	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+
+	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
 	i915_gem_object_unpin_map(cache->rq->batch->obj);
+
 	i915_gem_chipset_flush(cache->rq->i915);
 
 	i915_request_add(cache->rq);
@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (IS_ERR(cmd))
 		return PTR_ERR(cmd);
 
-	err = i915_gem_object_set_to_wc_domain(obj, false);
-	if (err)
-		goto err_unmap;
-
 	batch = i915_vma_instance(obj, vma->vm, NULL);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 91196348c68c2..9440024c763f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
 		drm_clflush_virt_range(d, i * sizeof(u32));
 	kunmap_atomic(d);
 
-	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+	ret = 0;
 out:
 	i915_gem_obj_finish_shmem_access(so->obj);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e0fcb982a14f3..85c5cb779297d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
 		goto unlock;
 	}
 
-	ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
-	if (ret)
-		goto err_unref;
+	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
 
 	/* PreHSW required 512K alignment, HSW requires 16M */
 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 24de34289d686..c5b4173271325 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -528,9 +528,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 		return PTR_ERR(obj);
 	}
 
-	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-	if (ret)
-		goto err;
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
 	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 131b89972a783..35f7ef9e75c8d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1248,6 +1248,30 @@ static void execlists_context_destroy(struct kref *kref)
 	intel_context_free(ce);
 }
 
+static int __context_pin(struct i915_vma *vma)
+{
+	unsigned int flags;
+	int err;
+
+	flags = PIN_GLOBAL | PIN_HIGH;
+	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+	err = i915_vma_pin(vma, 0, 0, flags);
+	if (err)
+		return err;
+
+	vma->obj->pin_global++;
+	vma->obj->mm.dirty = true;
+
+	return 0;
+}
+
+static void __context_unpin(struct i915_vma *vma)
+{
+	vma->obj->pin_global--;
+	__i915_vma_unpin(vma);
+}
+
 static void execlists_context_unpin(struct intel_context *ce)
 {
 	struct intel_engine_cs *engine;
@@ -1276,31 +1300,8 @@ static void execlists_context_unpin(struct intel_context *ce)
 
 	intel_ring_unpin(ce->ring);
 
-	ce->state->obj->pin_global--;
 	i915_gem_object_unpin_map(ce->state->obj);
-	i915_vma_unpin(ce->state);
-}
-
-static int __context_pin(struct i915_vma *vma)
-{
-	unsigned int flags;
-	int err;
-
-	/*
-	 * Clear this page out of any CPU caches for coherent swap-in/out.
-	 * We only want to do this on the first bind so that we do not stall
-	 * on an active context (which by nature is already on the GPU).
-	 */
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-		err = i915_gem_object_set_to_wc_domain(vma->obj, true);
-		if (err)
-			return err;
-	}
-
-	flags = PIN_GLOBAL | PIN_HIGH;
-	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
-	return i915_vma_pin(vma, 0, 0, flags);
+	__context_unpin(ce->state);
 }
 
 static void
@@ -1361,7 +1362,6 @@ __execlists_context_pin(struct intel_context *ce,
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	__execlists_update_reg_state(ce, engine);
 
-	ce->state->obj->pin_global++;
 	return 0;
 
 unpin_ring:
@@ -1369,7 +1369,7 @@ __execlists_context_pin(struct intel_context *ce,
 unpin_map:
 	i915_gem_object_unpin_map(ce->state->obj);
 unpin_vma:
-	__i915_vma_unpin(ce->state);
+	__context_unpin(ce->state);
 err:
 	return ret;
 }
@@ -2751,19 +2751,12 @@ populate_lr_context(struct intel_context *ce,
 	u32 *regs;
 	int ret;
 
-	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
-		return ret;
-	}
-
 	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->mm.dirty = true;
 
 	if (engine->default_state) {
 		/*
@@ -2798,7 +2791,11 @@ populate_lr_context(struct intel_context *ce,
 			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
 					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
 
+	ret = 0;
 err_unpin_ctx:
+	__i915_gem_object_flush_map(ctx_obj,
+				    LRC_HEADER_PAGES * PAGE_SIZE,
+				    engine->context_size);
 	i915_gem_object_unpin_map(ctx_obj);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03bbdf47e7e48..359d6af1a0b95 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1195,15 +1195,6 @@ int intel_ring_pin(struct intel_ring *ring)
 	else
 		flags |= PIN_HIGH;
 
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
-			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
-		else
-			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
-		if (unlikely(ret))
-			goto unpin_timeline;
-	}
-
 	ret = i915_vma_pin(vma, 0, 0, flags);
 	if (unlikely(ret))
 		goto unpin_timeline;
@@ -1392,17 +1383,6 @@ static int __context_pin(struct intel_context *ce)
 	if (!vma)
 		return 0;
 
-	/*
-	 * Clear this page out of any CPU caches for coherent swap-in/out.
-	 * We only want to do this on the first bind so that we do not stall
-	 * on an active context (which by nature is already on the GPU).
-	 */
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-		err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
-		if (err)
-			return err;
-	}
-
 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
 		return err;
@@ -1412,6 +1392,7 @@ static int __context_pin(struct intel_context *ce)
 	 * it cannot reclaim the object until we release it.
 	 */
 	vma->obj->pin_global++;
+	vma->obj->mm.dirty = true;
 
 	return 0;
 }
@@ -1446,6 +1427,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
+	/*
+	 * Try to make the context utilize L3 as well as LLC.
+	 *
+	 * On VLV we don't have L3 controls in the PTEs so we
+	 * shouldn't touch the cache level, especially as that
+	 * would make the object snooped which might have a
+	 * negative performance impact.
+	 *
+	 * Snooping is required on non-llc platforms in execlist
+	 * mode, but since all GGTT accesses use PAT entry 0 we
+	 * get snooping anyway regardless of cache_level.
+	 *
+	 * This is only applicable for Ivy Bridge devices since
+	 * later platforms don't have L3 control bits in the PTE.
+	 */
+	if (IS_IVYBRIDGE(i915))
+		i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
 	if (engine->default_state) {
 		void *defaults, *vaddr;
 
@@ -1463,29 +1462,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
 		}
 
 		memcpy(vaddr, defaults, engine->context_size);
-
 		i915_gem_object_unpin_map(engine->default_state);
-		i915_gem_object_unpin_map(obj);
-	}
 
-	/*
-	 * Try to make the context utilize L3 as well as LLC.
-	 *
-	 * On VLV we don't have L3 controls in the PTEs so we
-	 * shouldn't touch the cache level, especially as that
-	 * would make the object snooped which might have a
-	 * negative performance impact.
-	 *
-	 * Snooping is required on non-llc platforms in execlist
-	 * mode, but since all GGTT accesses use PAT entry 0 we
-	 * get snooping anyway regardless of cache_level.
-	 *
-	 * This is only applicable for Ivy Bridge devices since
-	 * later platforms don't have L3 control bits in the PTE.
-	 */
-	if (IS_IVYBRIDGE(i915)) {
-		/* Ignore any error, regard it as a simple optimisation */
-		i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+		i915_gem_object_flush_map(obj);
+		i915_gem_object_unpin_map(obj);
 	}
 
 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 1f419fb094f40..c5c8ba6c059f1 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	err = i915_gem_object_set_to_wc_domain(obj, true);
-	if (err)
-		goto err;
-
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
 	}
 	*vaddr = 0xdeadbeaf;
 
+	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
 	vma = i915_vma_instance(obj, vm, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index ab2aa32fd0196..fb6dc54ce7ff3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
 		offset += PAGE_SIZE;
 	}
 	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(obj);
 	i915_gem_object_unpin_map(obj);
 
 	err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -604,12 +605,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
 	*cmd++ = upper_32_bits(vma->node.start);
 	*cmd = MI_BATCH_BUFFER_END;
 
+	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	if (err)
-		goto err;
-
 	vma = i915_vma_instance(obj, vma->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
@@ -1202,12 +1200,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	}
 	*cmd++ = value;
 	*cmd = MI_BATCH_BUFFER_END;
+	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	if (err)
-		goto err;
-
 	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
@@ -1299,11 +1294,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 		*cmd++ = result;
 	}
 	*cmd = MI_BATCH_BUFFER_END;
-	i915_gem_object_unpin_map(obj);
 
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	if (err)
-		goto err;
+	i915_gem_object_flush_map(obj);
+	i915_gem_object_unpin_map(obj);
 
 	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
 	if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53ce..2b943ee246c9a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
 		goto err;
 	}
 	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+	i915_gem_object_flush_map(obj);
 	i915_gem_object_unpin_map(obj);
 
 	ptr = dma_buf_kmap(dmabuf, 1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b270eab1cad16..9a9451846b33f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
-	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 	quirk_add(obj, &objects);
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
 		err = PTR_ERR(obj);
 		goto cleanup;
 	}
-	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 	quirk_add(obj, &objects);
 
 	/* Neighbouring; same colour - should fit */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 3eb6a6b075abb..e6ffe2240126a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -619,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
 	}
 
 	*cmd = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(i915);
 
+	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	if (err)
-		goto err;
+	i915_gem_chipset_flush(i915);
 
 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
@@ -777,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	if (err)
 		goto err;
 
-	err = i915_gem_object_set_to_wc_domain(obj, true);
-	if (err)
-		goto err;
-
 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(cmd)) {
 		err = PTR_ERR(cmd);
@@ -799,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 		*cmd++ = lower_32_bits(vma->node.start);
 	}
 	*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
-	i915_gem_chipset_flush(i915);
 
+	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
+	i915_gem_chipset_flush(i915);
+
 	return vma;
 
 err:
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index d0b93a3fbc545..16890dfe74c00 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
-	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index b5e35b2a925f2..76b4fa150f2e4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -70,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
 		goto err_hws;
 	}
 
-	i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+	i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
 	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index d61520ea03c13..9e871eb0bfb18 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -1018,12 +1018,9 @@ static int live_preempt_smoke(void *arg)
 	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
 		cs[n] = MI_ARB_CHECK;
 	cs[n] = MI_BATCH_BUFFER_END;
+	i915_gem_object_flush_map(smoke.batch);
 	i915_gem_object_unpin_map(smoke.batch);
 
-	err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
-	if (err)
-		goto err_batch;
-
 	for (n = 0; n < smoke.ncontext; n++) {
 		smoke.contexts[n] = kernel_context(smoke.i915);
 		if (!smoke.contexts[n])
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index f2a2b51a4662b..3baed59008d72 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -90,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 		goto err_obj;
 	}
 	memset(cs, 0xc5, PAGE_SIZE);
+	i915_gem_object_flush_map(result);
 	i915_gem_object_unpin_map(result);
 
 	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@@ -358,6 +359,7 @@ static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
 		goto err_obj;
 	}
 	memset(ptr, 0xc5, PAGE_SIZE);
+	i915_gem_object_flush_map(obj);
 	i915_gem_object_unpin_map(obj);
 
 	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
@@ -551,6 +553,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 
 		*cs++ = MI_BATCH_BUFFER_END;
 
+		i915_gem_object_flush_map(batch->obj);
 		i915_gem_object_unpin_map(batch->obj);
 		i915_gem_chipset_flush(ctx->i915);
 

From 754a25442705c4f90e0d05f1a7bd303ffe700ca9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 16:19:08 +0000
Subject: [PATCH 34/95] drm/i915: Skip object locking around a no-op set-domain
 ioctl

If we are already in the desired write domain of a set-domain ioctl,
then there is nothing for us to do and we can quickly return back to
userspace, avoiding any lock contention. By recognising that the
write_domain is always a subset of the read_domains, and excluding the
no-op case of requiring 0 read_domains in the ioctl, we can infer if the
current write_domain matches the target read_domains, there is nothing
for us to do.

Secondary aspect of this is that we undo the arbitrary fetching and
potential flushing of all pages for a set-domain(.write=CPU) call on a
fresh object -- which was introduced simply because we do the get-pages
before taking the struct_mutex.

References: 40e62d5d6be8 ("drm/i915: Acquire the backing storage outside of struct_mutex in set-domain")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 26 +++++++++++++++++++++++---
 1 file changed, 23 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 72374e952e4bd..638ae277ff039 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1484,17 +1484,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
 		return -EINVAL;
 
-	/* Having something in the write domain implies it's in the read
+	/*
+	 * Having something in the write domain implies it's in the read
 	 * domain, and only that read domain.  Enforce that in the request.
 	 */
-	if (write_domain != 0 && read_domains != write_domain)
+	if (write_domain && read_domains != write_domain)
 		return -EINVAL;
 
+	if (!read_domains)
+		return 0;
+
 	obj = i915_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
-	/* Try to flush the object off the GPU without holding the lock.
+	/*
+	 * Already in the desired write domain? Nothing for us to do!
+	 *
+	 * We apply a little bit of cunning here to catch a broader set of
+	 * no-ops. If obj->write_domain is set, we must be in the same
+	 * obj->read_domains, and only that domain. Therefore, if that
+	 * obj->write_domain matches the request read_domains, we are
+	 * already in the same read/write domain and can skip the operation,
+	 * without having to further check the requested write_domain.
+	 */
+	if (READ_ONCE(obj->write_domain) == read_domains) {
+		err = 0;
+		goto out;
+	}
+
+	/*
+	 * Try to flush the object off the GPU without holding the lock.
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 */

From d067994cc425209032b5713cb3e7fac6d1f38460 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 19:40:30 +0000
Subject: [PATCH 35/95] drm/i915/selftests: Calculate maximum ring size for
 preemption chain

32 is too many for the likes of kbl, and in order to insert that many
requests into the ring requires us to declare the first few hung --
understandably a slow and unexpected process. Instead, measure the size
of a singe requests and use that to estimate the upper bound on the
chain length we can use for our test, remembering to flush the previous
chain between tests for safety.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: "Yokoyama, Caz" <caz.yokoyama@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321194031.20240-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 40 ++++++++++++++++++++--
 1 file changed, 37 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 9e871eb0bfb18..0afebbe2b9d74 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -615,14 +615,33 @@ static int live_chain_preempt(void *arg)
 		struct i915_sched_attr attr = {
 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
 		};
-		int count, i;
+		struct i915_request *rq;
+		int ring_size, count, i;
 
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
-		for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
-			struct i915_request *rq;
+		rq = igt_spinner_create_request(&lo.spin,
+						lo.ctx, engine,
+						MI_ARB_CHECK);
+		if (IS_ERR(rq))
+			goto err_wedged;
+		i915_request_add(rq);
+
+		ring_size = rq->wa_tail - rq->head;
+		if (ring_size < 0)
+			ring_size += rq->ring->size;
+		ring_size = rq->ring->size / ring_size;
+		pr_debug("%s(%s): Using maximum of %d requests\n",
+			 __func__, engine->name, ring_size);
 
+		igt_spinner_end(&lo.spin);
+		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+			pr_err("Timed out waiting to flush %s\n", engine->name);
+			goto err_wedged;
+		}
+
+		for_each_prime_number_from(count, 1, ring_size) {
 			rq = igt_spinner_create_request(&hi.spin,
 							hi.ctx, engine,
 							MI_ARB_CHECK);
@@ -664,6 +683,21 @@ static int live_chain_preempt(void *arg)
 				goto err_wedged;
 			}
 			igt_spinner_end(&lo.spin);
+
+			rq = i915_request_alloc(engine, lo.ctx);
+			if (IS_ERR(rq))
+				goto err_wedged;
+			i915_request_add(rq);
+			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(i915->drm.dev);
+
+				pr_err("Failed to flush low priority chain of %d requests\n",
+				       count);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+				goto err_wedged;
+			}
 		}
 	}
 

From e70d3d8040410a46893c0986a5b0fd64c466b7e9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 21 Mar 2019 19:40:31 +0000
Subject: [PATCH 36/95] drm/i915/selftests: Mark up preemption tests for hang
 detection

Use the igt_live_test framework for detecting whether an unwanted hang
occurred during test execution, and report failure if it does.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321194031.20240-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 38 ++++++++++++++++++++--
 1 file changed, 35 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0afebbe2b9d74..0d3cae564db87 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -10,6 +10,7 @@
 
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
+#include "igt_live_test.h"
 #include "igt_spinner.h"
 #include "i915_random.h"
 
@@ -113,11 +114,17 @@ static int live_preempt(void *arg)
 		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
 
 	for_each_engine(engine, i915, id) {
+		struct igt_live_test t;
 		struct i915_request *rq;
 
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
+		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
 						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
@@ -153,7 +160,8 @@ static int live_preempt(void *arg)
 
 		igt_spinner_end(&spin_hi);
 		igt_spinner_end(&spin_lo);
-		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+		if (igt_live_test_end(&t)) {
 			err = -EIO;
 			goto err_ctx_lo;
 		}
@@ -207,11 +215,17 @@ static int live_late_preempt(void *arg)
 		goto err_ctx_hi;
 
 	for_each_engine(engine, i915, id) {
+		struct igt_live_test t;
 		struct i915_request *rq;
 
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
+		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
 		rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
 						MI_ARB_CHECK);
 		if (IS_ERR(rq)) {
@@ -250,7 +264,8 @@ static int live_late_preempt(void *arg)
 
 		igt_spinner_end(&spin_hi);
 		igt_spinner_end(&spin_lo);
-		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+		if (igt_live_test_end(&t)) {
 			err = -EIO;
 			goto err_ctx_lo;
 		}
@@ -615,6 +630,7 @@ static int live_chain_preempt(void *arg)
 		struct i915_sched_attr attr = {
 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
 		};
+		struct igt_live_test t;
 		struct i915_request *rq;
 		int ring_size, count, i;
 
@@ -641,6 +657,11 @@ static int live_chain_preempt(void *arg)
 			goto err_wedged;
 		}
 
+		if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+			err = -EIO;
+			goto err_wedged;
+		}
+
 		for_each_prime_number_from(count, 1, ring_size) {
 			rq = igt_spinner_create_request(&hi.spin,
 							hi.ctx, engine,
@@ -699,6 +720,11 @@ static int live_chain_preempt(void *arg)
 				goto err_wedged;
 			}
 		}
+
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			goto err_wedged;
+		}
 	}
 
 	err = 0;
@@ -1022,6 +1048,7 @@ static int live_preempt_smoke(void *arg)
 	};
 	const unsigned int phase[] = { 0, BATCH };
 	intel_wakeref_t wakeref;
+	struct igt_live_test t;
 	int err = -ENOMEM;
 	u32 *cs;
 	int n;
@@ -1055,6 +1082,11 @@ static int live_preempt_smoke(void *arg)
 	i915_gem_object_flush_map(smoke.batch);
 	i915_gem_object_unpin_map(smoke.batch);
 
+	if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+		err = -EIO;
+		goto err_batch;
+	}
+
 	for (n = 0; n < smoke.ncontext; n++) {
 		smoke.contexts[n] = kernel_context(smoke.i915);
 		if (!smoke.contexts[n])
@@ -1072,7 +1104,7 @@ static int live_preempt_smoke(void *arg)
 	}
 
 err_ctx:
-	if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+	if (igt_live_test_end(&t))
 		err = -EIO;
 
 	for (n = 0; n < smoke.ncontext; n++) {

From b9d52d381e142fb3275430ac40ab119565d046f4 Mon Sep 17 00:00:00 2001
From: Sujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Date: Thu, 21 Mar 2019 13:38:04 -0700
Subject: [PATCH 37/95] drm/i915/guc: GuC suspend path cleanup

Adding a call to intel_uc_suspend in i915_gem_suspend, which
is a common point for the suspend/resume and hibernate paths.
This fixes an unbalanced call that causes issues with the CTB
register/deregister.

v2: Making the call unconditional (Daniele)
	Moving the call to after the GEM_BUG_ON (Chris)

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Sujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321203804.6845-1-sujaritha.sundaresan@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 638ae277ff039..4685ed04b3545 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4431,6 +4431,8 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 */
 	GEM_BUG_ON(i915->gt.awake);
 
+	intel_uc_suspend(i915);
+
 	intel_runtime_pm_put(i915, wakeref);
 }
 

From 9d1305ef80b95dde0337106ed8b826604e2155ad Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 22 Mar 2019 09:23:22 +0000
Subject: [PATCH 38/95] drm/i915: Introduce the i915_user_extension_method

An idea for extending uABI inspired by Vulkan's extension chains.
Instead of expanding the data struct for each ioctl every time we need
to add a new feature, define an extension chain instead. As we add
optional interfaces to control the ioctl, we define a new extension
struct that can be linked into the ioctl data only when required by the
user. The key advantage being able to ignore large control structs for
optional interfaces/extensions, while being able to process them in a
consistent manner.

In comparison to other extensible ioctls, the key difference is the
use of a linked chain of extension structs vs an array of tagged
pointers. For example,

struct drm_amdgpu_cs_chunk {
        __u32           chunk_id;
        __u32           length_dw;
        __u64           chunk_data;
};

struct drm_amdgpu_cs_in {
        __u32           ctx_id;
        __u32           bo_list_handle;
        __u32           num_chunks;
        __u32           _pad;
        __u64           chunks;
};

allows userspace to pass in array of pointers to extension structs, but
must therefore keep constructing that array along side the command stream.
In dynamic situations like that, a linked list is preferred and does not
similar from extra cache line misses as the extension structs themselves
must still be loaded separate to the chunks array.

v2: Apply the tail call optimisation directly to nip the worry of stack
overflow in the bud.
v3: Defend against recursion.
v4: Fixup local types to match new uabi

Opens:
- do we include the result as an out-field in each chain?
struct i915_user_extension {
	__u64 next_extension;
	__u64 name;
	__s32 result;
	__u32 mbz; /* reserved for future use */
};
* Undecided, so provision some room for future expansion.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/Makefile               |  1 +
 drivers/gpu/drm/i915/i915_user_extensions.c | 61 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_user_extensions.h | 20 +++++++
 drivers/gpu/drm/i915/i915_utils.h           | 31 +++++++++++
 include/uapi/drm/i915_drm.h                 | 22 ++++++++
 5 files changed, 135 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/i915_user_extensions.c
 create mode 100644 drivers/gpu/drm/i915/i915_user_extensions.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 68fecf3554714..60de05f3fa608 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,6 +46,7 @@ i915-y := i915_drv.o \
 	  i915_sw_fence.o \
 	  i915_syncmap.o \
 	  i915_sysfs.o \
+	  i915_user_extensions.o \
 	  intel_csr.o \
 	  intel_device_info.o \
 	  intel_pm.o \
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c
new file mode 100644
index 0000000000000..c822d0aafd2dc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.c
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/nospec.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_user_extensions.h"
+#include "i915_utils.h"
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+			 const i915_user_extension_fn *tbl,
+			 unsigned int count,
+			 void *data)
+{
+	unsigned int stackdepth = 512;
+
+	while (ext) {
+		int i, err;
+		u32 name;
+		u64 next;
+
+		if (!stackdepth--) /* recursion vs useful flexibility */
+			return -E2BIG;
+
+		err = check_user_mbz(&ext->flags);
+		if (err)
+			return err;
+
+		for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
+			err = check_user_mbz(&ext->rsvd[i]);
+			if (err)
+				return err;
+		}
+
+		if (get_user(name, &ext->name))
+			return -EFAULT;
+
+		err = -EINVAL;
+		if (name < count) {
+			name = array_index_nospec(name, count);
+			if (tbl[name])
+				err = tbl[name](ext, data);
+		}
+		if (err)
+			return err;
+
+		if (get_user(next, &ext->next_extension) ||
+		    overflows_type(next, ext))
+			return -EFAULT;
+
+		ext = u64_to_user_ptr(next);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h
new file mode 100644
index 0000000000000..a14bf6bba9a1f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef I915_USER_EXTENSIONS_H
+#define I915_USER_EXTENSIONS_H
+
+struct i915_user_extension;
+
+typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
+				      void *data);
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+			 const i915_user_extension_fn *tbl,
+			 unsigned int count,
+			 void *data);
+
+#endif /* I915_USER_EXTENSIONS_H */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 9726df37c4c41..8baedcaddbba7 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -105,6 +105,37 @@
 	__T;								\
 })
 
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({				\
+	void __user *__mptr = (void __user *)(ptr);			\
+	BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) &&	\
+			 !__same_type(*(ptr), void),			\
+			 "pointer type mismatch in container_of()");	\
+	((type __user *)(__mptr - offsetof(type, member))); })
+
+/*
+ * check_user_mbz: Check that a user value exists and is zero
+ *
+ * Frequently in our uABI we reserve space for future extensions, and
+ * two ensure that userspace is prepared we enforce that space must
+ * be zero. (Then any future extension can safely assume a default value
+ * of 0.)
+ *
+ * check_user_mbz() combines checking that the user pointer is accessible
+ * and that the contained value is zero.
+ *
+ * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
+ */
+#define check_user_mbz(U) ({						\
+	typeof(*(U)) mbz__;						\
+	get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0;		\
+})
+
 static inline u64 ptr_to_u64(const void *ptr)
 {
 	return (uintptr_t)ptr;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index aa2d4c73a97dd..1c69ed16a923b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -62,6 +62,28 @@ extern "C" {
 #define I915_ERROR_UEVENT		"ERROR"
 #define I915_RESET_UEVENT		"RESET"
 
+/*
+ * i915_user_extension: Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ */
+struct i915_user_extension {
+	__u64 next_extension;
+	__u32 name;
+	__u32 flags; /* All undefined bits must be zero. */
+	__u32 rsvd[4]; /* Reserved for future use; must be zero. */
+};
+
 /*
  * MOCS indexes used for GPU surfaces, defining the cacheability of the
  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.

From e0695db7298ec2181d38bd3bfc5d0ff7a46f96f3 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 22 Mar 2019 09:23:23 +0000
Subject: [PATCH 39/95] drm/i915: Create/destroy VM (ppGTT) for use with
 contexts

In preparation to making the ppGTT binding for a context explicit (to
facilitate reusing the same ppGTT between different contexts), allow the
user to create and destroy named ppGTT.

v2: Replace global barrier for swapping over the ppgtt and tlbs with a
local context barrier (Tvrtko)
v3: serialise with struct_mutex; it's lazy but required dammit
v4: Rewrite igt_ctx_shared_exec to be more different (aimed to be more
similarly, turned out different!)

v5: Fix up test unwind for aliasing-ppgtt (snb)
v6: Tighten language for uapi struct drm_i915_gem_vm_control.
v7: Patch the context image for runtime ppgtt switching!

Testcase: igt/gem_vm_create
Testcase: igt/gem_ctx_param/vm
Testcase: igt/gem_ctx_clone/vm
Testcase: igt/gem_ctx_shared
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c               |   2 +
 drivers/gpu/drm/i915/i915_drv.h               |   3 +
 drivers/gpu/drm/i915/i915_gem_context.c       | 331 +++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem_context.h       |   5 +
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  19 +-
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  10 +-
 drivers/gpu/drm/i915/selftests/huge_pages.c   |   1 -
 .../gpu/drm/i915/selftests/i915_gem_context.c | 238 ++++++++++---
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   1 -
 drivers/gpu/drm/i915/selftests/mock_context.c |   8 +-
 include/uapi/drm/i915_drm.h                   |  43 +++
 11 files changed, 580 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d01689737f1e7..f6403ba1d56a5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -3122,6 +3122,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fefcb39aefc42..f723b15527f80 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -220,6 +220,9 @@ struct drm_i915_file_private {
 	struct idr context_idr;
 	struct mutex context_idr_lock; /* guards context_idr */
 
+	struct idr vm_idr;
+	struct mutex vm_idr_lock; /* guards vm_idr */
+
 	unsigned int bsd_engine;
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 00dec72f68758..d0a56c8d0bb9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,7 @@
 #include "i915_drv.h"
 #include "i915_globals.h"
 #include "i915_trace.h"
+#include "i915_user_extensions.h"
 #include "intel_lrc_reg.h"
 #include "intel_workarounds.h"
 
@@ -120,12 +121,15 @@ static void lut_close(struct i915_gem_context *ctx)
 		list_del(&lut->obj_link);
 		i915_lut_handle_free(lut);
 	}
+	INIT_LIST_HEAD(&ctx->handles_list);
 
 	rcu_read_lock();
 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 		struct i915_vma *vma = rcu_dereference_raw(*slot);
 
 		radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+
+		vma->open_count--;
 		__i915_gem_object_release_unless_active(vma->obj);
 	}
 	rcu_read_unlock();
@@ -305,8 +309,6 @@ static void context_close(struct i915_gem_context *ctx)
 	 * the ppgtt).
 	 */
 	lut_close(ctx);
-	if (ctx->ppgtt)
-		i915_ppgtt_close(&ctx->ppgtt->vm);
 
 	ctx->file_priv = ERR_PTR(-EBADF);
 	i915_gem_context_put(ctx);
@@ -378,6 +380,28 @@ __create_context(struct drm_i915_private *dev_priv)
 	return ctx;
 }
 
+static struct i915_hw_ppgtt *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+{
+	struct i915_hw_ppgtt *old = ctx->ppgtt;
+
+	ctx->ppgtt = i915_ppgtt_get(ppgtt);
+	ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+
+	return old;
+}
+
+static void __assign_ppgtt(struct i915_gem_context *ctx,
+			   struct i915_hw_ppgtt *ppgtt)
+{
+	if (ppgtt == ctx->ppgtt)
+		return;
+
+	ppgtt = __set_ppgtt(ctx, ppgtt);
+	if (ppgtt)
+		i915_ppgtt_put(ppgtt);
+}
+
 static struct i915_gem_context *
 i915_gem_create_context(struct drm_i915_private *dev_priv)
 {
@@ -403,8 +427,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv)
 			return ERR_CAST(ppgtt);
 		}
 
-		ctx->ppgtt = ppgtt;
-		ctx->desc_template = default_desc_template(dev_priv, ppgtt);
+		__assign_ppgtt(ctx, ppgtt);
+		i915_ppgtt_put(ppgtt);
 	}
 
 	trace_i915_context_create(ctx);
@@ -583,6 +607,12 @@ static int context_idr_cleanup(int id, void *p, void *data)
 	return 0;
 }
 
+static int vm_idr_cleanup(int id, void *p, void *data)
+{
+	i915_ppgtt_put(p);
+	return 0;
+}
+
 static int gem_context_register(struct i915_gem_context *ctx,
 				struct drm_i915_file_private *fpriv)
 {
@@ -621,8 +651,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	struct i915_gem_context *ctx;
 	int err;
 
-	idr_init(&file_priv->context_idr);
 	mutex_init(&file_priv->context_idr_lock);
+	mutex_init(&file_priv->vm_idr_lock);
+
+	idr_init(&file_priv->context_idr);
+	idr_init_base(&file_priv->vm_idr, 1);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	ctx = i915_gem_create_context(i915);
@@ -646,8 +679,10 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	context_close(ctx);
 	mutex_unlock(&i915->drm.struct_mutex);
 err:
-	mutex_destroy(&file_priv->context_idr_lock);
+	idr_destroy(&file_priv->vm_idr);
 	idr_destroy(&file_priv->context_idr);
+	mutex_destroy(&file_priv->vm_idr_lock);
+	mutex_destroy(&file_priv->context_idr_lock);
 	return PTR_ERR(ctx);
 }
 
@@ -660,6 +695,99 @@ void i915_gem_context_close(struct drm_file *file)
 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
 	idr_destroy(&file_priv->context_idr);
 	mutex_destroy(&file_priv->context_idr_lock);
+
+	idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
+	idr_destroy(&file_priv->vm_idr);
+	mutex_destroy(&file_priv->vm_idr_lock);
+}
+
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_gem_vm_control *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct i915_hw_ppgtt *ppgtt;
+	int err;
+
+	if (!HAS_FULL_PPGTT(i915))
+		return -ENODEV;
+
+	if (args->flags)
+		return -EINVAL;
+
+	ppgtt = i915_ppgtt_create(i915);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
+
+	ppgtt->vm.file = file_priv;
+
+	if (args->extensions) {
+		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+					   NULL, 0,
+					   ppgtt);
+		if (err)
+			goto err_put;
+	}
+
+	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	if (err)
+		goto err_put;
+
+	err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+	if (err < 0)
+		goto err_unlock;
+
+	GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
+	ppgtt->user_handle = err;
+
+	mutex_unlock(&file_priv->vm_idr_lock);
+
+	args->vm_id = err;
+	return 0;
+
+err_unlock:
+	mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+	i915_ppgtt_put(ppgtt);
+	return err;
+}
+
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_vm_control *args = data;
+	struct i915_hw_ppgtt *ppgtt;
+	int err;
+	u32 id;
+
+	if (args->flags)
+		return -EINVAL;
+
+	if (args->extensions)
+		return -EINVAL;
+
+	id = args->vm_id;
+	if (!id)
+		return -ENOENT;
+
+	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	if (err)
+		return err;
+
+	ppgtt = idr_remove(&file_priv->vm_idr, id);
+	if (ppgtt) {
+		GEM_BUG_ON(ppgtt->user_handle != id);
+		ppgtt->user_handle = 0;
+	}
+
+	mutex_unlock(&file_priv->vm_idr_lock);
+	if (!ppgtt)
+		return -ENOENT;
+
+	i915_ppgtt_put(ppgtt);
+	return 0;
 }
 
 static struct i915_request *
@@ -702,12 +830,13 @@ static void cb_retire(struct i915_active *base)
 I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
 				unsigned long engines,
+				int (*emit)(struct i915_request *rq, void *data),
 				void (*task)(void *data),
 				void *data)
 {
 	struct drm_i915_private *i915 = ctx->i915;
 	struct context_barrier_task *cb;
-	struct intel_context *ce;
+	struct intel_context *ce, *next;
 	intel_wakeref_t wakeref;
 	int err = 0;
 
@@ -722,11 +851,11 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 	i915_active_acquire(&cb->base);
 
 	wakeref = intel_runtime_pm_get(i915);
-	list_for_each_entry(ce, &ctx->active_engines, active_link) {
+	rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
 		struct intel_engine_cs *engine = ce->engine;
 		struct i915_request *rq;
 
-		if (!(ce->engine->mask & engines))
+		if (!(engine->mask & engines))
 			continue;
 
 		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -741,7 +870,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 			break;
 		}
 
-		err = i915_active_ref(&cb->base, rq->fence.context, rq);
+		err = 0;
+		if (emit)
+			err = emit(rq, data);
+		if (err == 0)
+			err = i915_active_ref(&cb->base, rq->fence.context, rq);
+
 		i915_request_add(rq);
 		if (err)
 			break;
@@ -804,6 +938,170 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
 	return 0;
 }
 
+static int get_ppgtt(struct i915_gem_context *ctx,
+		     struct drm_i915_gem_context_param *args)
+{
+	struct drm_i915_file_private *file_priv = ctx->file_priv;
+	struct i915_hw_ppgtt *ppgtt;
+	int ret;
+
+	if (!ctx->ppgtt)
+		return -ENODEV;
+
+	/* XXX rcu acquire? */
+	ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+	if (ret)
+		return ret;
+
+	ppgtt = i915_ppgtt_get(ctx->ppgtt);
+	mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+	ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	if (ret)
+		goto err_put;
+
+	if (!ppgtt->user_handle) {
+		ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+		GEM_BUG_ON(!ret);
+		if (ret < 0)
+			goto err_unlock;
+
+		ppgtt->user_handle = ret;
+		i915_ppgtt_get(ppgtt);
+	}
+
+	args->size = 0;
+	args->value = ppgtt->user_handle;
+
+	ret = 0;
+err_unlock:
+	mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+	i915_ppgtt_put(ppgtt);
+	return ret;
+}
+
+static void set_ppgtt_barrier(void *data)
+{
+	struct i915_hw_ppgtt *old = data;
+
+	if (INTEL_GEN(old->vm.i915) < 8)
+		gen6_ppgtt_unpin_all(old);
+
+	i915_ppgtt_put(old);
+}
+
+static int emit_ppgtt_update(struct i915_request *rq, void *data)
+{
+	struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+	struct intel_engine_cs *engine = rq->engine;
+	u32 *cs;
+	int i;
+
+	if (i915_vm_is_4lvl(&ppgtt->vm)) {
+		const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
+
+		cs = intel_ring_begin(rq, 6);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = MI_LOAD_REGISTER_IMM(2);
+
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0));
+		*cs++ = upper_32_bits(pd_daddr);
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
+		*cs++ = lower_32_bits(pd_daddr);
+
+		*cs++ = MI_NOOP;
+		intel_ring_advance(rq, cs);
+	} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+		cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+		for (i = GEN8_3LVL_PDPES; i--; ) {
+			const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+			*cs++ = upper_32_bits(pd_daddr);
+			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+			*cs++ = lower_32_bits(pd_daddr);
+		}
+		*cs++ = MI_NOOP;
+		intel_ring_advance(rq, cs);
+	} else {
+		/* ppGTT is not part of the legacy context image */
+		gen6_ppgtt_pin(ppgtt);
+	}
+
+	return 0;
+}
+
+static int set_ppgtt(struct i915_gem_context *ctx,
+		     struct drm_i915_gem_context_param *args)
+{
+	struct drm_i915_file_private *file_priv = ctx->file_priv;
+	struct i915_hw_ppgtt *ppgtt, *old;
+	int err;
+
+	if (args->size)
+		return -EINVAL;
+
+	if (!ctx->ppgtt)
+		return -ENODEV;
+
+	if (upper_32_bits(args->value))
+		return -ENOENT;
+
+	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	if (err)
+		return err;
+
+	ppgtt = idr_find(&file_priv->vm_idr, args->value);
+	if (ppgtt) {
+		GEM_BUG_ON(ppgtt->user_handle != args->value);
+		i915_ppgtt_get(ppgtt);
+	}
+	mutex_unlock(&file_priv->vm_idr_lock);
+	if (!ppgtt)
+		return -ENOENT;
+
+	err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+	if (err)
+		goto out;
+
+	if (ppgtt == ctx->ppgtt)
+		goto unlock;
+
+	/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+	lut_close(ctx);
+
+	old = __set_ppgtt(ctx, ppgtt);
+
+	/*
+	 * We need to flush any requests using the current ppgtt before
+	 * we release it as the requests do not hold a reference themselves,
+	 * only indirectly through the context.
+	 */
+	err = context_barrier_task(ctx, ALL_ENGINES,
+				   emit_ppgtt_update,
+				   set_ppgtt_barrier,
+				   old);
+	if (err) {
+		ctx->ppgtt = old;
+		ctx->desc_template = default_desc_template(ctx->i915, old);
+		i915_ppgtt_put(ppgtt);
+	}
+
+unlock:
+	mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+out:
+	i915_ppgtt_put(ppgtt);
+	return err;
+}
+
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
@@ -984,6 +1282,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 	case I915_CONTEXT_PARAM_SSEU:
 		ret = get_sseu(ctx, args);
 		break;
+	case I915_CONTEXT_PARAM_VM:
+		ret = get_ppgtt(ctx, args);
+		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -1285,9 +1586,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 
 	switch (args->param) {
-	case I915_CONTEXT_PARAM_BAN_PERIOD:
-		ret = -EINVAL;
-		break;
 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
 		if (args->size)
 			ret = -EINVAL;
@@ -1343,9 +1641,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 					I915_USER_PRIORITY(priority);
 		}
 		break;
+
 	case I915_CONTEXT_PARAM_SSEU:
 		ret = set_sseu(ctx, args);
 		break;
+
+	case I915_CONTEXT_PARAM_VM:
+		ret = set_ppgtt(ctx, args);
+		break;
+
+	case I915_CONTEXT_PARAM_BAN_PERIOD:
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7e79d1ffbaef1..edc6ba3f02888 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -148,6 +148,11 @@ void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
 i915_gem_context_create_gvt(struct drm_device *dev);
 
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file);
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file);
+
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file);
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b9e0e3a002239..736c845eb77f7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1937,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
 	int err;
 
+	GEM_BUG_ON(ppgtt->base.vm.closed);
+
 	/*
 	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
 	 * which will be pinned into every active context.
@@ -1975,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
 	i915_vma_unpin(ppgtt->vma);
 }
 
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+{
+	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+	if (!ppgtt->pin_count)
+		return;
+
+	ppgtt->pin_count = 0;
+	i915_vma_unpin(ppgtt->vma);
+}
+
 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 {
 	struct i915_ggtt * const ggtt = &i915->ggtt;
@@ -2082,12 +2095,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
 	return ppgtt;
 }
 
-void i915_ppgtt_close(struct i915_address_space *vm)
-{
-	GEM_BUG_ON(vm->closed);
-	vm->closed = true;
-}
-
 static void ppgtt_destroy_vma(struct i915_address_space *vm)
 {
 	struct list_head *phases[] = {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b76ab4c2a0e6f..83ded9fc761aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -396,6 +396,8 @@ struct i915_hw_ppgtt {
 		struct i915_page_directory_pointer pdp;	/* GEN8+ */
 		struct i915_page_directory pd;		/* GEN6-7 */
 	};
+
+	u32 user_handle;
 };
 
 struct gen6_hw_ppgtt {
@@ -605,13 +607,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
 
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-void i915_ppgtt_close(struct i915_address_space *vm);
 void i915_ppgtt_release(struct kref *kref);
 
-static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
-	if (ppgtt)
-		kref_get(&ppgtt->ref);
+	kref_get(&ppgtt->ref);
+	return ppgtt;
 }
 
 static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
@@ -622,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
 
 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
 
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index c5c8ba6c059f1..90721b54e7aeb 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1732,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
 	err = i915_subtests(tests, ppgtt);
 
 out_close:
-	i915_ppgtt_close(&ppgtt->vm);
 	i915_ppgtt_put(ppgtt);
 
 out_unlock:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index fb6dc54ce7ff3..45f73b8b4e6d1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -373,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
 	return 0;
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+static noinline int cpu_check(struct drm_i915_gem_object *obj,
+			      unsigned int idx, unsigned int max)
 {
 	unsigned int n, m, needs_flush;
 	int err;
@@ -391,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
 
 		for (m = 0; m < max; m++) {
 			if (map[m] != m) {
-				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
-				       n, m, map[m], m);
+				pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+				       __builtin_return_address(0), idx,
+				       n, real_page_count(obj), m, max,
+				       map[m], m);
 				err = -EINVAL;
 				goto out_unmap;
 			}
@@ -400,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
 
 		for (; m < DW_PER_PAGE; m++) {
 			if (map[m] != STACK_MAGIC) {
-				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
-				       n, m, map[m], STACK_MAGIC);
+				pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+				       __builtin_return_address(0), idx, n, m,
+				       map[m], STACK_MAGIC);
 				err = -EINVAL;
 				goto out_unmap;
 			}
@@ -479,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct drm_i915_gem_object *obj = NULL;
-	unsigned long ncontexts, ndwords, dw;
-	struct igt_live_test t;
-	struct drm_file *file;
-	IGT_TIMEOUT(end_time);
-	LIST_HEAD(objects);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int err = -ENODEV;
 
 	/*
@@ -496,44 +496,167 @@ static int igt_ctx_exec(void *arg)
 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
 		return 0;
 
+	for_each_engine(engine, i915, id) {
+		struct drm_i915_gem_object *obj = NULL;
+		unsigned long ncontexts, ndwords, dw;
+		struct igt_live_test t;
+		struct drm_file *file;
+		IGT_TIMEOUT(end_time);
+		LIST_HEAD(objects);
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (!engine->context_size)
+			continue; /* No logical context support in HW */
+
+		file = mock_file(i915);
+		if (IS_ERR(file))
+			return PTR_ERR(file);
+
+		mutex_lock(&i915->drm.struct_mutex);
+
+		err = igt_live_test_begin(&t, i915, __func__, engine->name);
+		if (err)
+			goto out_unlock;
+
+		ncontexts = 0;
+		ndwords = 0;
+		dw = 0;
+		while (!time_after(jiffies, end_time)) {
+			struct i915_gem_context *ctx;
+			intel_wakeref_t wakeref;
+
+			ctx = live_context(i915, file);
+			if (IS_ERR(ctx)) {
+				err = PTR_ERR(ctx);
+				goto out_unlock;
+			}
+
+			if (!obj) {
+				obj = create_test_object(ctx, file, &objects);
+				if (IS_ERR(obj)) {
+					err = PTR_ERR(obj);
+					goto out_unlock;
+				}
+			}
+
+			with_intel_runtime_pm(i915, wakeref)
+				err = gpu_fill(obj, ctx, engine, dw);
+			if (err) {
+				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+				       ndwords, dw, max_dwords(obj),
+				       engine->name, ctx->hw_id,
+				       yesno(!!ctx->ppgtt), err);
+				goto out_unlock;
+			}
+
+			if (++dw == max_dwords(obj)) {
+				obj = NULL;
+				dw = 0;
+			}
+
+			ndwords++;
+			ncontexts++;
+		}
+
+		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+			ncontexts, engine->name, ndwords);
+
+		ncontexts = dw = 0;
+		list_for_each_entry(obj, &objects, st_link) {
+			unsigned int rem =
+				min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+			err = cpu_check(obj, ncontexts++, rem);
+			if (err)
+				break;
+
+			dw += rem;
+		}
+
+out_unlock:
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mock_file_free(i915, file);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int igt_shared_ctx_exec(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *parent;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_live_test t;
+	struct drm_file *file;
+	int err = 0;
+
+	/*
+	 * Create a few different contexts with the same mm and write
+	 * through each ctx using the GPU making sure those writes end
+	 * up in the expected pages of our obj.
+	 */
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return 0;
+
 	file = mock_file(i915);
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
 	mutex_lock(&i915->drm.struct_mutex);
 
+	parent = live_context(i915, file);
+	if (IS_ERR(parent)) {
+		err = PTR_ERR(parent);
+		goto out_unlock;
+	}
+
+	if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+		err = 0;
+		goto out_unlock;
+	}
+
 	err = igt_live_test_begin(&t, i915, __func__, "");
 	if (err)
 		goto out_unlock;
 
-	ncontexts = 0;
-	ndwords = 0;
-	dw = 0;
-	while (!time_after(jiffies, end_time)) {
-		struct intel_engine_cs *engine;
-		struct i915_gem_context *ctx;
-		unsigned int id;
+	for_each_engine(engine, i915, id) {
+		unsigned long ncontexts, ndwords, dw;
+		struct drm_i915_gem_object *obj = NULL;
+		IGT_TIMEOUT(end_time);
+		LIST_HEAD(objects);
 
-		ctx = live_context(i915, file);
-		if (IS_ERR(ctx)) {
-			err = PTR_ERR(ctx);
-			goto out_unlock;
-		}
+		if (!intel_engine_can_store_dword(engine))
+			continue;
 
-		for_each_engine(engine, i915, id) {
+		dw = 0;
+		ndwords = 0;
+		ncontexts = 0;
+		while (!time_after(jiffies, end_time)) {
+			struct i915_gem_context *ctx;
 			intel_wakeref_t wakeref;
 
-			if (!engine->context_size)
-				continue; /* No logical context support in HW */
+			ctx = kernel_context(i915);
+			if (IS_ERR(ctx)) {
+				err = PTR_ERR(ctx);
+				goto out_test;
+			}
 
-			if (!intel_engine_can_store_dword(engine))
-				continue;
+			__assign_ppgtt(ctx, parent->ppgtt);
 
 			if (!obj) {
-				obj = create_test_object(ctx, file, &objects);
+				obj = create_test_object(parent, file, &objects);
 				if (IS_ERR(obj)) {
 					err = PTR_ERR(obj);
-					goto out_unlock;
+					kernel_context_close(ctx);
+					goto out_test;
 				}
 			}
 
@@ -545,35 +668,39 @@ static int igt_ctx_exec(void *arg)
 				       ndwords, dw, max_dwords(obj),
 				       engine->name, ctx->hw_id,
 				       yesno(!!ctx->ppgtt), err);
-				goto out_unlock;
+				kernel_context_close(ctx);
+				goto out_test;
 			}
 
 			if (++dw == max_dwords(obj)) {
 				obj = NULL;
 				dw = 0;
 			}
+
 			ndwords++;
+			ncontexts++;
+
+			kernel_context_close(ctx);
 		}
-		ncontexts++;
-	}
-	pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
-		ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
+		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+			ncontexts, engine->name, ndwords);
 
-	dw = 0;
-	list_for_each_entry(obj, &objects, st_link) {
-		unsigned int rem =
-			min_t(unsigned int, ndwords - dw, max_dwords(obj));
+		ncontexts = dw = 0;
+		list_for_each_entry(obj, &objects, st_link) {
+			unsigned int rem =
+				min_t(unsigned int, ndwords - dw, max_dwords(obj));
 
-		err = cpu_check(obj, rem);
-		if (err)
-			break;
+			err = cpu_check(obj, ncontexts++, rem);
+			if (err)
+				goto out_test;
 
-		dw += rem;
+			dw += rem;
+		}
 	}
-
-out_unlock:
+out_test:
 	if (igt_live_test_end(&t))
 		err = -EIO;
+out_unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	mock_file_free(i915, file);
@@ -1046,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
 	struct drm_i915_gem_object *obj = NULL;
 	struct i915_gem_context *ctx;
 	struct i915_hw_ppgtt *ppgtt;
-	unsigned long ndwords, dw;
+	unsigned long idx, ndwords, dw;
 	struct igt_live_test t;
 	struct drm_file *file;
 	I915_RND_STATE(prng);
@@ -1127,6 +1254,7 @@ static int igt_ctx_readonly(void *arg)
 		ndwords, RUNTIME_INFO(i915)->num_engines);
 
 	dw = 0;
+	idx = 0;
 	list_for_each_entry(obj, &objects, st_link) {
 		unsigned int rem =
 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
@@ -1136,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
 		if (i915_gem_object_is_readonly(obj))
 			num_writes = 0;
 
-		err = cpu_check(obj, num_writes);
+		err = cpu_check(obj, idx++, num_writes);
 		if (err)
 			break;
 
@@ -1619,7 +1747,8 @@ static int mock_context_barrier(void *arg)
 	}
 
 	counter = 0;
-	err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, 0,
+				   NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1631,8 +1760,8 @@ static int mock_context_barrier(void *arg)
 	}
 
 	counter = 0;
-	err = context_barrier_task(ctx,
-				   ALL_ENGINES, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES,
+				   NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1655,8 +1784,8 @@ static int mock_context_barrier(void *arg)
 
 	counter = 0;
 	context_barrier_inject_fault = BIT(RCS0);
-	err = context_barrier_task(ctx,
-				   ALL_ENGINES, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES,
+				   NULL, mock_barrier_task, &counter);
 	context_barrier_inject_fault = 0;
 	if (err == -ENXIO)
 		err = 0;
@@ -1670,8 +1799,8 @@ static int mock_context_barrier(void *arg)
 		goto out;
 
 	counter = 0;
-	err = context_barrier_task(ctx,
-				   ALL_ENGINES, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES,
+				   NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1719,6 +1848,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
 		SUBTEST(igt_ctx_sseu),
+		SUBTEST(igt_shared_ctx_exec),
 		SUBTEST(igt_vm_isolation),
 	};
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 01084f6b4fb78..9cca66e4420a6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 
 	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
-	i915_ppgtt_close(&ppgtt->vm);
 	i915_ppgtt_put(ppgtt);
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index bc14e9aec2748..81e5ace18b81e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -54,13 +54,17 @@ mock_context(struct drm_i915_private *i915,
 		goto err_handles;
 
 	if (name) {
+		struct i915_hw_ppgtt *ppgtt;
+
 		ctx->name = kstrdup(name, GFP_KERNEL);
 		if (!ctx->name)
 			goto err_put;
 
-		ctx->ppgtt = mock_ppgtt(i915, name);
-		if (!ctx->ppgtt)
+		ppgtt = mock_ppgtt(i915, name);
+		if (!ppgtt)
 			goto err_put;
+
+		__set_ppgtt(ctx, ppgtt);
 	}
 
 	return ctx;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 1c69ed16a923b..9af7a8e6a46ed 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -343,6 +343,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_PERF_ADD_CONFIG	0x37
 #define DRM_I915_PERF_REMOVE_CONFIG	0x38
 #define DRM_I915_QUERY			0x39
+#define DRM_I915_GEM_VM_CREATE		0x3a
+#define DRM_I915_GEM_VM_DESTROY		0x3b
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -402,6 +404,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -1453,6 +1457,33 @@ struct drm_i915_gem_context_destroy {
 	__u32 pad;
 };
 
+/*
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @vm_id.
+ *
+ * No flags are defined, with all bits reserved and must be zero.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @vm_id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+	__u64 extensions;
+	__u32 flags;
+	__u32 vm_id;
+};
+
 struct drm_i915_reg_read {
 	/*
 	 * Register offset.
@@ -1542,7 +1573,19 @@ struct drm_i915_gem_context_param {
  * On creation, all new contexts are marked as recoverable.
  */
 #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
+
+	/*
+	 * The id of the associated virtual memory address space (ppGTT) of
+	 * this context. Can be retrieved and passed to another context
+	 * (on the same fd) for both to use the same ppGTT and so share
+	 * address layouts, and avoid reloading the page tables on context
+	 * switches between themselves.
+	 *
+	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+	 */
+#define I915_CONTEXT_PARAM_VM		0x9
 /* Must be kept compact -- no holes and well documented */
+
 	__u64 value;
 };
 

From b91715417244a7e478304425ba671bfafee9c43e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 22 Mar 2019 09:23:24 +0000
Subject: [PATCH 40/95] drm/i915: Extend CONTEXT_CREATE to set parameters upon
 construction

It can be useful to have a single ioctl to create a context with all
the initial parameters instead of a series of create + setparam + setparam
ioctls. This extension to create context allows any of the parameters
to be passed in as a linked list to be applied to the newly constructed
context.

v2: Make a local copy of user setparam (Tvrtko)
v3: Use flags to detect availability of extension interface

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c         |   2 +-
 drivers/gpu/drm/i915/i915_gem_context.c | 452 +++++++++++++-----------
 include/uapi/drm/i915_drm.h             | 180 +++++-----
 3 files changed, 349 insertions(+), 285 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6403ba1d56a5..d3df0ebaa09c9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -3111,7 +3111,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d0a56c8d0bb9f..68f9c7c7bd6c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -1102,198 +1102,6 @@ static int set_ppgtt(struct i915_gem_context *ctx,
 	return err;
 }
 
-static bool client_is_banned(struct drm_i915_file_private *file_priv)
-{
-	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
-				  struct drm_file *file)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct drm_i915_gem_context_create *args = data;
-	struct drm_i915_file_private *file_priv = file->driver_priv;
-	struct i915_gem_context *ctx;
-	int ret;
-
-	if (!DRIVER_CAPS(i915)->has_logical_contexts)
-		return -ENODEV;
-
-	if (args->pad != 0)
-		return -EINVAL;
-
-	ret = i915_terminally_wedged(i915);
-	if (ret)
-		return ret;
-
-	if (client_is_banned(file_priv)) {
-		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
-			  current->comm,
-			  pid_nr(get_task_pid(current, PIDTYPE_PID)));
-
-		return -EIO;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	ctx = i915_gem_create_context(i915);
-	mutex_unlock(&dev->struct_mutex);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
-
-	ret = gem_context_register(ctx, file_priv);
-	if (ret < 0)
-		goto err_ctx;
-
-	args->ctx_id = ret;
-	DRM_DEBUG("HW context %d created\n", args->ctx_id);
-
-	return 0;
-
-err_ctx:
-	mutex_lock(&dev->struct_mutex);
-	context_close(ctx);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
-				   struct drm_file *file)
-{
-	struct drm_i915_gem_context_destroy *args = data;
-	struct drm_i915_file_private *file_priv = file->driver_priv;
-	struct i915_gem_context *ctx;
-
-	if (args->pad != 0)
-		return -EINVAL;
-
-	if (!args->ctx_id)
-		return -ENOENT;
-
-	if (mutex_lock_interruptible(&file_priv->context_idr_lock))
-		return -EINTR;
-
-	ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
-	mutex_unlock(&file_priv->context_idr_lock);
-	if (!ctx)
-		return -ENOENT;
-
-	mutex_lock(&dev->struct_mutex);
-	context_close(ctx);
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-static int get_sseu(struct i915_gem_context *ctx,
-		    struct drm_i915_gem_context_param *args)
-{
-	struct drm_i915_gem_context_param_sseu user_sseu;
-	struct intel_engine_cs *engine;
-	struct intel_context *ce;
-
-	if (args->size == 0)
-		goto out;
-	else if (args->size < sizeof(user_sseu))
-		return -EINVAL;
-
-	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
-			   sizeof(user_sseu)))
-		return -EFAULT;
-
-	if (user_sseu.flags || user_sseu.rsvd)
-		return -EINVAL;
-
-	engine = intel_engine_lookup_user(ctx->i915,
-					  user_sseu.engine_class,
-					  user_sseu.engine_instance);
-	if (!engine)
-		return -EINVAL;
-
-	ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
-	if (IS_ERR(ce))
-		return PTR_ERR(ce);
-
-	user_sseu.slice_mask = ce->sseu.slice_mask;
-	user_sseu.subslice_mask = ce->sseu.subslice_mask;
-	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
-	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
-
-	intel_context_pin_unlock(ce);
-
-	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
-			 sizeof(user_sseu)))
-		return -EFAULT;
-
-out:
-	args->size = sizeof(user_sseu);
-
-	return 0;
-}
-
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file)
-{
-	struct drm_i915_file_private *file_priv = file->driver_priv;
-	struct drm_i915_gem_context_param *args = data;
-	struct i915_gem_context *ctx;
-	int ret = 0;
-
-	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-	if (!ctx)
-		return -ENOENT;
-
-	switch (args->param) {
-	case I915_CONTEXT_PARAM_BAN_PERIOD:
-		ret = -EINVAL;
-		break;
-	case I915_CONTEXT_PARAM_NO_ZEROMAP:
-		args->size = 0;
-		args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
-		break;
-	case I915_CONTEXT_PARAM_GTT_SIZE:
-		args->size = 0;
-
-		if (ctx->ppgtt)
-			args->value = ctx->ppgtt->vm.total;
-		else if (to_i915(dev)->mm.aliasing_ppgtt)
-			args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
-		else
-			args->value = to_i915(dev)->ggtt.vm.total;
-		break;
-	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
-		args->size = 0;
-		args->value = i915_gem_context_no_error_capture(ctx);
-		break;
-	case I915_CONTEXT_PARAM_BANNABLE:
-		args->size = 0;
-		args->value = i915_gem_context_is_bannable(ctx);
-		break;
-	case I915_CONTEXT_PARAM_RECOVERABLE:
-		args->size = 0;
-		args->value = i915_gem_context_is_recoverable(ctx);
-		break;
-	case I915_CONTEXT_PARAM_PRIORITY:
-		args->size = 0;
-		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
-		break;
-	case I915_CONTEXT_PARAM_SSEU:
-		ret = get_sseu(ctx, args);
-		break;
-	case I915_CONTEXT_PARAM_VM:
-		ret = get_ppgtt(ctx, args);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	i915_gem_context_put(ctx);
-	return ret;
-}
-
 static int gen8_emit_rpcs_config(struct i915_request *rq,
 				 struct intel_context *ce,
 				 struct intel_sseu sseu)
@@ -1573,18 +1381,11 @@ static int set_sseu(struct i915_gem_context *ctx,
 	return 0;
 }
 
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file)
+static int ctx_setparam(struct i915_gem_context *ctx,
+			struct drm_i915_gem_context_param *args)
 {
-	struct drm_i915_file_private *file_priv = file->driver_priv;
-	struct drm_i915_gem_context_param *args = data;
-	struct i915_gem_context *ctx;
 	int ret = 0;
 
-	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-	if (!ctx)
-		return -ENOENT;
-
 	switch (args->param) {
 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
 		if (args->size)
@@ -1594,6 +1395,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 		else
 			clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
 		break;
+
 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 		if (args->size)
 			ret = -EINVAL;
@@ -1602,6 +1404,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 		else
 			i915_gem_context_clear_no_error_capture(ctx);
 		break;
+
 	case I915_CONTEXT_PARAM_BANNABLE:
 		if (args->size)
 			ret = -EINVAL;
@@ -1628,7 +1431,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 
 			if (args->size)
 				ret = -EINVAL;
-			else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+			else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
 				ret = -ENODEV;
 			else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
 				 priority < I915_CONTEXT_MIN_USER_PRIORITY)
@@ -1656,6 +1459,251 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 		break;
 	}
 
+	return ret;
+}
+
+struct create_ext {
+	struct i915_gem_context *ctx;
+	struct drm_i915_file_private *fpriv;
+};
+
+static int create_setparam(struct i915_user_extension __user *ext, void *data)
+{
+	struct drm_i915_gem_context_create_ext_setparam local;
+	const struct create_ext *arg = data;
+
+	if (copy_from_user(&local, ext, sizeof(local)))
+		return -EFAULT;
+
+	if (local.param.ctx_id)
+		return -EINVAL;
+
+	return ctx_setparam(arg->ctx, &local.param);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_gem_context_create_ext *args = data;
+	struct create_ext ext_data;
+	int ret;
+
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return -ENODEV;
+
+	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
+		return -EINVAL;
+
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
+	ext_data.fpriv = file->driver_priv;
+	if (client_is_banned(ext_data.fpriv)) {
+		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+			  current->comm,
+			  pid_nr(get_task_pid(current, PIDTYPE_PID)));
+		return -EIO;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ext_data.ctx = i915_gem_create_context(i915);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(ext_data.ctx))
+		return PTR_ERR(ext_data.ctx);
+
+	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
+		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+					   create_extensions,
+					   ARRAY_SIZE(create_extensions),
+					   &ext_data);
+		if (ret)
+			goto err_ctx;
+	}
+
+	ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+	if (ret < 0)
+		goto err_ctx;
+
+	args->ctx_id = ret;
+	DRM_DEBUG("HW context %d created\n", args->ctx_id);
+
+	return 0;
+
+err_ctx:
+	mutex_lock(&dev->struct_mutex);
+	context_close(ext_data.ctx);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file)
+{
+	struct drm_i915_gem_context_destroy *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct i915_gem_context *ctx;
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	if (!args->ctx_id)
+		return -ENOENT;
+
+	if (mutex_lock_interruptible(&file_priv->context_idr_lock))
+		return -EINTR;
+
+	ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
+	mutex_unlock(&file_priv->context_idr_lock);
+	if (!ctx)
+		return -ENOENT;
+
+	mutex_lock(&dev->struct_mutex);
+	context_close(ctx);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int get_sseu(struct i915_gem_context *ctx,
+		    struct drm_i915_gem_context_param *args)
+{
+	struct drm_i915_gem_context_param_sseu user_sseu;
+	struct intel_engine_cs *engine;
+	struct intel_context *ce;
+
+	if (args->size == 0)
+		goto out;
+	else if (args->size < sizeof(user_sseu))
+		return -EINVAL;
+
+	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+			   sizeof(user_sseu)))
+		return -EFAULT;
+
+	if (user_sseu.flags || user_sseu.rsvd)
+		return -EINVAL;
+
+	engine = intel_engine_lookup_user(ctx->i915,
+					  user_sseu.engine_class,
+					  user_sseu.engine_instance);
+	if (!engine)
+		return -EINVAL;
+
+	ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	user_sseu.slice_mask = ce->sseu.slice_mask;
+	user_sseu.subslice_mask = ce->sseu.subslice_mask;
+	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+	intel_context_pin_unlock(ce);
+
+	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+			 sizeof(user_sseu)))
+		return -EFAULT;
+
+out:
+	args->size = sizeof(user_sseu);
+
+	return 0;
+}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_context_param *args = data;
+	struct i915_gem_context *ctx;
+	int ret = 0;
+
+	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+	if (!ctx)
+		return -ENOENT;
+
+	switch (args->param) {
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		args->size = 0;
+		args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
+		break;
+
+	case I915_CONTEXT_PARAM_GTT_SIZE:
+		args->size = 0;
+		if (ctx->ppgtt)
+			args->value = ctx->ppgtt->vm.total;
+		else if (to_i915(dev)->mm.aliasing_ppgtt)
+			args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+		else
+			args->value = to_i915(dev)->ggtt.vm.total;
+		break;
+
+	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+		args->size = 0;
+		args->value = i915_gem_context_no_error_capture(ctx);
+		break;
+
+	case I915_CONTEXT_PARAM_BANNABLE:
+		args->size = 0;
+		args->value = i915_gem_context_is_bannable(ctx);
+		break;
+
+	case I915_CONTEXT_PARAM_RECOVERABLE:
+		args->size = 0;
+		args->value = i915_gem_context_is_recoverable(ctx);
+		break;
+
+	case I915_CONTEXT_PARAM_PRIORITY:
+		args->size = 0;
+		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
+		break;
+
+	case I915_CONTEXT_PARAM_SSEU:
+		ret = get_sseu(ctx, args);
+		break;
+
+	case I915_CONTEXT_PARAM_VM:
+		ret = get_ppgtt(ctx, args);
+		break;
+
+	case I915_CONTEXT_PARAM_BAN_PERIOD:
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	i915_gem_context_put(ctx);
+	return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_context_param *args = data;
+	struct i915_gem_context *ctx;
+	int ret;
+
+	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+	if (!ctx)
+		return -ENOENT;
+
+	ret = ctx_setparam(ctx, args);
+
 	i915_gem_context_put(ctx);
 	return ret;
 }
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 9af7a8e6a46ed..d45b79746fc49 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -394,6 +394,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
@@ -1447,92 +1448,17 @@ struct drm_i915_gem_wait {
 };
 
 struct drm_i915_gem_context_create {
-	/*  output: id of new context*/
-	__u32 ctx_id;
-	__u32 pad;
-};
-
-struct drm_i915_gem_context_destroy {
-	__u32 ctx_id;
-	__u32 pad;
-};
-
-/*
- * DRM_I915_GEM_VM_CREATE -
- *
- * Create a new virtual memory address space (ppGTT) for use within a context
- * on the same file. Extensions can be provided to configure exactly how the
- * address space is setup upon creation.
- *
- * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
- * returned in the outparam @vm_id.
- *
- * No flags are defined, with all bits reserved and must be zero.
- *
- * An extension chain maybe provided, starting with @extensions, and terminated
- * by the @next_extension being 0. Currently, no extensions are defined.
- *
- * DRM_I915_GEM_VM_DESTROY -
- *
- * Destroys a previously created VM id, specified in @vm_id.
- *
- * No extensions or flags are allowed currently, and so must be zero.
- */
-struct drm_i915_gem_vm_control {
-	__u64 extensions;
-	__u32 flags;
-	__u32 vm_id;
-};
-
-struct drm_i915_reg_read {
-	/*
-	 * Register offset.
-	 * For 64bit wide registers where the upper 32bits don't immediately
-	 * follow the lower 32bits, the offset of the lower 32bits must
-	 * be specified
-	 */
-	__u64 offset;
-#define I915_REG_READ_8B_WA (1ul << 0)
-
-	__u64 val; /* Return value */
-};
-/* Known registers:
- *
- * Render engine timestamp - 0x2358 + 64bit - gen7+
- * - Note this register returns an invalid value if using the default
- *   single instruction 8byte read, in order to workaround that pass
- *   flag I915_REG_READ_8B_WA in offset field.
- *
- */
-
-struct drm_i915_reset_stats {
-	__u32 ctx_id;
-	__u32 flags;
-
-	/* All resets since boot/module reload, for all contexts */
-	__u32 reset_count;
-
-	/* Number of batches lost when active in GPU, for this context */
-	__u32 batch_active;
-
-	/* Number of batches lost pending for execution, for this context */
-	__u32 batch_pending;
-
+	__u32 ctx_id; /* output: id of new context*/
 	__u32 pad;
 };
 
-struct drm_i915_gem_userptr {
-	__u64 user_ptr;
-	__u64 user_size;
+struct drm_i915_gem_context_create_ext {
+	__u32 ctx_id; /* output: id of new context*/
 	__u32 flags;
-#define I915_USERPTR_READ_ONLY 0x1
-#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
-	/**
-	 * Returned handle for the object.
-	 *
-	 * Object handles are nonzero.
-	 */
-	__u32 handle;
+#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
+	(-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+	__u64 extensions;
 };
 
 struct drm_i915_gem_context_param {
@@ -1648,6 +1574,96 @@ struct drm_i915_gem_context_param_sseu {
 	__u32 rsvd;
 };
 
+struct drm_i915_gem_context_create_ext_setparam {
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+	struct i915_user_extension base;
+	struct drm_i915_gem_context_param param;
+};
+
+struct drm_i915_gem_context_destroy {
+	__u32 ctx_id;
+	__u32 pad;
+};
+
+/*
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @id.
+ *
+ * No flags are defined, with all bits reserved and must be zero.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+	__u64 extensions;
+	__u32 flags;
+	__u32 vm_id;
+};
+
+struct drm_i915_reg_read {
+	/*
+	 * Register offset.
+	 * For 64bit wide registers where the upper 32bits don't immediately
+	 * follow the lower 32bits, the offset of the lower 32bits must
+	 * be specified
+	 */
+	__u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
+	__u64 val; /* Return value */
+};
+
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ *   single instruction 8byte read, in order to workaround that pass
+ *   flag I915_REG_READ_8B_WA in offset field.
+ *
+ */
+
+struct drm_i915_reset_stats {
+	__u32 ctx_id;
+	__u32 flags;
+
+	/* All resets since boot/module reload, for all contexts */
+	__u32 reset_count;
+
+	/* Number of batches lost when active in GPU, for this context */
+	__u32 batch_active;
+
+	/* Number of batches lost pending for execution, for this context */
+	__u32 batch_pending;
+
+	__u32 pad;
+};
+
+struct drm_i915_gem_userptr {
+	__u64 user_ptr;
+	__u64 user_size;
+	__u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+	/**
+	 * Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	__u32 handle;
+};
+
 enum drm_i915_oa_format {
 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
 	I915_OA_FORMAT_A29,	    /* HSW only */

From ea593dbba4c8ed841630fa5445202627e1046ba6 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 22 Mar 2019 09:23:25 +0000
Subject: [PATCH 41/95] drm/i915: Allow contexts to share a single timeline
 across all engines

Previously, our view has been always to run the engines independently
within a context. (Multiple engines happened before we had contexts and
timelines, so they always operated independently and that behaviour
persisted into contexts.) However, at the user level the context often
represents a single timeline (e.g. GL contexts) and userspace must
ensure that the individual engines are serialised to present that
ordering to the client (or forgot about this detail entirely and hope no
one notices - a fair ploy if the client can only directly control one
engine themselves ;)

In the next patch, we will want to construct a set of engines that
operate as one, that have a single timeline interwoven between them, to
present a single virtual engine to the user. (They submit to the virtual
engine, then we decide which engine to execute on based.)

To that end, we want to be able to create contexts which have a single
timeline (fence context) shared between all engines, rather than multiple
timelines.

v2: Move the specialised timeline ordering to its own function.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-4-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c       | 31 +++++--
 drivers/gpu/drm/i915/i915_gem_context_types.h |  2 +
 drivers/gpu/drm/i915/i915_request.c           | 80 +++++++++++++------
 drivers/gpu/drm/i915/i915_request.h           |  5 +-
 drivers/gpu/drm/i915/i915_sw_fence.c          | 39 +++++++--
 drivers/gpu/drm/i915/i915_sw_fence.h          | 13 ++-
 drivers/gpu/drm/i915/intel_lrc.c              |  5 +-
 drivers/gpu/drm/i915/selftests/mock_context.c |  2 +-
 include/uapi/drm/i915_drm.h                   |  3 +-
 9 files changed, 138 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 68f9c7c7bd6c8..39ca3cb6af666 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -238,6 +238,9 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
 		intel_context_put(it);
 
+	if (ctx->timeline)
+		i915_timeline_put(ctx->timeline);
+
 	kfree(ctx->name);
 	put_pid(ctx->pid);
 
@@ -403,12 +406,16 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
 }
 
 static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv)
+i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 {
 	struct i915_gem_context *ctx;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
+	    !HAS_EXECLISTS(dev_priv))
+		return ERR_PTR(-EINVAL);
+
 	/* Reap the most stale context */
 	contexts_free_first(dev_priv);
 
@@ -431,6 +438,18 @@ i915_gem_create_context(struct drm_i915_private *dev_priv)
 		i915_ppgtt_put(ppgtt);
 	}
 
+	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
+		struct i915_timeline *timeline;
+
+		timeline = i915_timeline_create(dev_priv, NULL);
+		if (IS_ERR(timeline)) {
+			context_close(ctx);
+			return ERR_CAST(timeline);
+		}
+
+		ctx->timeline = timeline;
+	}
+
 	trace_i915_context_create(ctx);
 
 	return ctx;
@@ -459,7 +478,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (ret)
 		return ERR_PTR(ret);
 
-	ctx = i915_gem_create_context(to_i915(dev));
+	ctx = i915_gem_create_context(to_i915(dev), 0);
 	if (IS_ERR(ctx))
 		goto out;
 
@@ -495,7 +514,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	struct i915_gem_context *ctx;
 	int err;
 
-	ctx = i915_gem_create_context(i915);
+	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx))
 		return ctx;
 
@@ -658,7 +677,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	idr_init_base(&file_priv->vm_idr, 1);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	ctx = i915_gem_create_context(i915);
+	ctx = i915_gem_create_context(i915, 0);
 	mutex_unlock(&i915->drm.struct_mutex);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
@@ -800,7 +819,7 @@ last_request_on_engine(struct i915_timeline *timeline,
 
 	rq = i915_active_request_raw(&timeline->last_request,
 				     &engine->i915->drm.struct_mutex);
-	if (rq && rq->engine == engine) {
+	if (rq && rq->engine->mask & engine->mask) {
 		GEM_TRACE("last request on engine %s: %llx:%llu\n",
 			  engine->name, rq->fence.context, rq->fence.seqno);
 		GEM_BUG_ON(rq->timeline != timeline);
@@ -1520,7 +1539,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	ext_data.ctx = i915_gem_create_context(i915);
+	ext_data.ctx = i915_gem_create_context(i915, args->flags);
 	mutex_unlock(&dev->struct_mutex);
 	if (IS_ERR(ext_data.ctx))
 		return PTR_ERR(ext_data.ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
index 63ae8eb219393..e2ec58b10fb28 100644
--- a/drivers/gpu/drm/i915/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -41,6 +41,8 @@ struct i915_gem_context {
 	/** file_priv: owning file descriptor */
 	struct drm_i915_file_private *file_priv;
 
+	struct i915_timeline *timeline;
+
 	/**
 	 * @ppgtt: unique address space (GTT)
 	 *
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1529824d7c613..e9c2094ab8eaf 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -992,6 +992,60 @@ void i915_request_skip(struct i915_request *rq, int error)
 	memset(vaddr + head, 0, rq->postfix - head);
 }
 
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+	struct i915_timeline *timeline = rq->timeline;
+	struct i915_request *prev;
+
+	/*
+	 * Dependency tracking and request ordering along the timeline
+	 * is special cased so that we can eliminate redundant ordering
+	 * operations while building the request (we know that the timeline
+	 * itself is ordered, and here we guarantee it).
+	 *
+	 * As we know we will need to emit tracking along the timeline,
+	 * we embed the hooks into our request struct -- at the cost of
+	 * having to have specialised no-allocation interfaces (which will
+	 * be beneficial elsewhere).
+	 *
+	 * A second benefit to open-coding i915_request_await_request is
+	 * that we can apply a slight variant of the rules specialised
+	 * for timelines that jump between engines (such as virtual engines).
+	 * If we consider the case of virtual engine, we must emit a dma-fence
+	 * to prevent scheduling of the second request until the first is
+	 * complete (to maximise our greedy late load balancing) and this
+	 * precludes optimising to use semaphores serialisation of a single
+	 * timeline across engines.
+	 */
+	prev = i915_active_request_raw(&timeline->last_request,
+				       &rq->i915->drm.struct_mutex);
+	if (prev && !i915_request_completed(prev)) {
+		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+			i915_sw_fence_await_sw_fence(&rq->submit,
+						     &prev->submit,
+						     &rq->submitq);
+		else
+			__i915_sw_fence_await_dma_fence(&rq->submit,
+							&prev->fence,
+							&rq->dmaq);
+		if (rq->engine->schedule)
+			__i915_sched_node_add_dependency(&rq->sched,
+							 &prev->sched,
+							 &rq->dep,
+							 0);
+	}
+
+	spin_lock_irq(&timeline->lock);
+	list_add_tail(&rq->link, &timeline->requests);
+	spin_unlock_irq(&timeline->lock);
+
+	GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
+	__i915_active_request_set(&timeline->last_request, rq);
+
+	return prev;
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -1036,31 +1090,7 @@ void i915_request_add(struct i915_request *request)
 	GEM_BUG_ON(IS_ERR(cs));
 	request->postfix = intel_ring_offset(request, cs);
 
-	/*
-	 * Seal the request and mark it as pending execution. Note that
-	 * we may inspect this state, without holding any locks, during
-	 * hangcheck. Hence we apply the barrier to ensure that we do not
-	 * see a more recent value in the hws than we are tracking.
-	 */
-
-	prev = i915_active_request_raw(&timeline->last_request,
-				       &request->i915->drm.struct_mutex);
-	if (prev && !i915_request_completed(prev)) {
-		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
-					     &request->submitq);
-		if (engine->schedule)
-			__i915_sched_node_add_dependency(&request->sched,
-							 &prev->sched,
-							 &request->dep,
-							 0);
-	}
-
-	spin_lock_irq(&timeline->lock);
-	list_add_tail(&request->link, &timeline->requests);
-	spin_unlock_irq(&timeline->lock);
-
-	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
-	__i915_active_request_set(&timeline->last_request, request);
+	prev = __i915_request_add_to_timeline(request);
 
 	list_add_tail(&request->ring_link, &ring->request_list);
 	if (list_is_first(&request->ring_link, &ring->request_list))
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8c8fa50106440..cd6c130964cda 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -128,7 +128,10 @@ struct i915_request {
 	 * It is used by the driver to then queue the request for execution.
 	 */
 	struct i915_sw_fence submit;
-	wait_queue_entry_t submitq;
+	union {
+		wait_queue_entry_t submitq;
+		struct i915_sw_dma_fence_cb dmaq;
+	};
 	struct list_head execute_cb;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 8d1400d378d7a..5387aafd3424b 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
 }
 
-struct i915_sw_dma_fence_cb {
-	struct dma_fence_cb base;
-	struct i915_sw_fence *fence;
-};
-
 struct i915_sw_dma_fence_cb_timer {
 	struct i915_sw_dma_fence_cb base;
 	struct dma_fence *dma;
@@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 	return ret;
 }
 
+static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
+				     struct dma_fence_cb *data)
+{
+	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+	i915_sw_fence_complete(cb->fence);
+}
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				    struct dma_fence *dma,
+				    struct i915_sw_dma_fence_cb *cb)
+{
+	int ret;
+
+	debug_fence_assert(fence);
+
+	if (dma_fence_is_signaled(dma))
+		return 0;
+
+	cb->fence = fence;
+	i915_sw_fence_await(fence);
+
+	ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
+	if (ret == 0) {
+		ret = 1;
+	} else {
+		i915_sw_fence_complete(fence);
+		if (ret == -ENOENT) /* fence already signaled */
+			ret = 0;
+	}
+
+	return ret;
+}
+
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 6dec9e1d11029..9cb5c3b307a62 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -9,14 +9,13 @@
 #ifndef _I915_SW_FENCE_H_
 #define _I915_SW_FENCE_H_
 
+#include <linux/dma-fence.h>
 #include <linux/gfp.h>
 #include <linux/kref.h>
 #include <linux/notifier.h> /* for NOTIFY_DONE */
 #include <linux/wait.h>
 
 struct completion;
-struct dma_fence;
-struct dma_fence_ops;
 struct reservation_object;
 
 struct i915_sw_fence {
@@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 				     struct i915_sw_fence *after,
 				     gfp_t gfp);
+
+struct i915_sw_dma_fence_cb {
+	struct dma_fence_cb base;
+	struct i915_sw_fence *fence;
+};
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				    struct dma_fence *dma,
+				    struct i915_sw_dma_fence_cb *cb);
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  gfp_t gfp);
+
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 35f7ef9e75c8d..66bc3cd4e1669 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2802,7 +2802,10 @@ populate_lr_context(struct intel_context *ce,
 
 static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
 {
-	return i915_timeline_create(ctx->i915, NULL);
+	if (ctx->timeline)
+		return i915_timeline_get(ctx->timeline);
+	else
+		return i915_timeline_create(ctx->i915, NULL);
 }
 
 static int execlists_context_deferred_alloc(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 81e5ace18b81e..0426093bf1d9f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -97,7 +97,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	ctx = i915_gem_create_context(i915);
+	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx))
 		return ctx;
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d45b79746fc49..9999f7d6a5a9b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1456,8 +1456,9 @@ struct drm_i915_gem_context_create_ext {
 	__u32 ctx_id; /* output: id of new context*/
 	__u32 flags;
 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
-	(-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
 	__u64 extensions;
 };
 

From 6086e47b35e1a25df31842a49971c277cd84af8f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 19:51:28 +0200
Subject: [PATCH 42/95] drm/i915: Really calculate the cursor ddb based on the
 highest enabled wm level
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

I added the loop but neglected to actually pass the level to the
function. So we were just looping 8 times calculating the exact
same thing every time.

Fixes: df331de3f8aa ("drm/i915: Allocate enough DDB for the cursor")
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321175128.32178-1-ville.syrjala@linux.intel.com
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fcd3baff8b65c..eaf0793ebf607 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3953,7 +3953,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
 	WARN_ON(ret);
 
 	for (level = 0; level <= max_level; level++) {
-		skl_compute_plane_wm(crtc_state, 7, &wp, &wm, &wm);
+		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
 		if (wm.min_ddb_alloc == U16_MAX)
 			break;
 

From 0dc927eb9496ca5dfcde476f00c23b2283f8a5e2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:41 +0200
Subject: [PATCH 43/95] drm/i915: Refactor EDID fixed mode search
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Both LVDS and eDP have the same code to look up the preferred mode
from the connector probed_modes list. Move the code to a common
location.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-1-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c    | 13 +++----------
 drivers/gpu/drm/i915/intel_drv.h   |  2 ++
 drivers/gpu/drm/i915/intel_lvds.c  | 14 +++-----------
 drivers/gpu/drm/i915/intel_panel.c | 27 +++++++++++++++++++++++++++
 4 files changed, 35 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7c043e8f6298f..5096e99ffaadc 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -7057,7 +7057,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	struct drm_display_mode *fixed_mode = NULL;
 	struct drm_display_mode *downclock_mode = NULL;
 	bool has_dpcd;
-	struct drm_display_mode *scan;
 	enum pipe pipe = INVALID_PIPE;
 	intel_wakeref_t wakeref;
 	struct edid *edid;
@@ -7110,15 +7109,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	}
 	intel_connector->edid = edid;
 
-	/* prefer fixed mode from EDID if available */
-	list_for_each_entry(scan, &connector->probed_modes, head) {
-		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
-			fixed_mode = drm_mode_duplicate(dev, scan);
-			downclock_mode = intel_dp_drrs_init(
-						intel_connector, fixed_mode);
-			break;
-		}
-	}
+	fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+	if (fixed_mode)
+		downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
 
 	/* fallback to VBT if available for eDP */
 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0ffa083a7af7e..30cb9f39c7a29 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2157,6 +2157,8 @@ extern struct drm_display_mode *intel_find_panel_downclock(
 				struct drm_i915_private *dev_priv,
 				struct drm_display_mode *fixed_mode,
 				struct drm_connector *connector);
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector);
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 845792aa0abee..0686ad1f12df7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -810,7 +810,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
-	struct drm_display_mode *scan; /* *modes, *bios_mode; */
 	struct drm_display_mode *fixed_mode = NULL;
 	struct drm_display_mode *downclock_mode = NULL;
 	struct edid *edid;
@@ -949,16 +948,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	}
 	intel_connector->edid = edid;
 
-	list_for_each_entry(scan, &connector->probed_modes, head) {
-		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-			DRM_DEBUG_KMS("using preferred mode from EDID: ");
-			drm_mode_debug_printmodeline(scan);
-
-			fixed_mode = drm_mode_duplicate(dev, scan);
-			if (fixed_mode)
-				goto out;
-		}
-	}
+	fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+	if (fixed_mode)
+		goto out;
 
 	/* Failed to get EDID, what about VBT? */
 	if (dev_priv->vbt.lfp_lvds_vbt_mode) {
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index edd5540639b0e..f42137512010e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -99,6 +99,33 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
 		return NULL;
 }
 
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	const struct drm_display_mode *scan;
+
+	/* prefer fixed mode from EDID if available */
+	list_for_each_entry(scan, &connector->base.probed_modes, head) {
+		struct drm_display_mode *fixed_mode;
+
+		if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
+			continue;
+
+		fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+		if (!fixed_mode)
+			return NULL;
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+			      connector->base.base.id, connector->base.name);
+		drm_mode_debug_printmodeline(fixed_mode);
+
+		return fixed_mode;
+	}
+
+	return NULL;
+}
+
 /* adjusted_mode has been preset to be the panel's fixed mode */
 void
 intel_pch_panel_fitting(struct intel_crtc *intel_crtc,

From 8f49673ef919e828cf66c23d9c73c1d50e56610a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:42 +0200
Subject: [PATCH 44/95] drm/i915: Pick the first mode from EDID as the fixed
 mode when there is no preferred mode
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Some monitors apparently forget to mark any mode as preferred in the
EDID. In this particular case we have a very generic looking ID
"PNP Model 0 Serial Number 4" / "LVDS 800x600" so a specific quirk
doesn't seem particularly wise. Also the quirk we have
(EDID_QUIRK_FIRST_DETAILED_PREFERRED) is actually defunct so we'd
have to fix it first.

When there is no preferred mode we currently fall back to the VBT.
That approach fails us here as the VBT mode is 1024x768 whereas
the panel resolution is 800x600. So instead of falling back to the
VBT when there is no preferred mode let's just pick the first
probed mode. Only if the EDID provided no modes we fall back to
the VBT.

For this machine the VBIOS would appear to select the 800x600
60Hz EST mode rather than the first detailed mode (which is
the new fallback will pick). The two modes differ only by
having opposite sync polarities, which does not seem to matter
to the panel in question.

v2: Make sure the probed_modes list is not empty

Cc: Adam Jackson <ajax@redhat.com>
Cc: Roberto Viola <cagnulein@gmail.com>
Tested-by: Roberto Viola <cagnulein@gmail.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109780
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-2-ville.syrjala@linux.intel.com
Reviewed-by: Adam Jackson <ajax@redhat.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_panel.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f42137512010e..b9ee18c2dff5b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -104,11 +104,13 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	const struct drm_display_mode *scan;
+	struct drm_display_mode *fixed_mode;
+
+	if (list_empty(&connector->base.probed_modes))
+		return NULL;
 
 	/* prefer fixed mode from EDID if available */
 	list_for_each_entry(scan, &connector->base.probed_modes, head) {
-		struct drm_display_mode *fixed_mode;
-
 		if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
 			continue;
 
@@ -123,7 +125,20 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
 		return fixed_mode;
 	}
 
-	return NULL;
+	scan = list_first_entry(&connector->base.probed_modes,
+				typeof(*scan), head);
+
+	fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+	if (!fixed_mode)
+		return NULL;
+
+	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
+		      connector->base.base.id, connector->base.name);
+	drm_mode_debug_printmodeline(fixed_mode);
+
+	return fixed_mode;
 }
 
 /* adjusted_mode has been preset to be the panel's fixed mode */

From 325710d3d4a3474d3cdad404c84056f7e98353b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:43 +0200
Subject: [PATCH 45/95] drm/i915: Refactor VBT fixed mode handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

LVDS and eDP have essentially the same code for grabbing the
fixed mode from VBT. Pull that code to a common location.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-3-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c    | 11 ++---------
 drivers/gpu/drm/i915/intel_drv.h   |  2 ++
 drivers/gpu/drm/i915/intel_lvds.c  | 15 +++------------
 drivers/gpu/drm/i915/intel_panel.c | 27 +++++++++++++++++++++++++++
 4 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5096e99ffaadc..57d769bca3b84 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -7114,15 +7114,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 		downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
 
 	/* fallback to VBT if available for eDP */
-	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
-		fixed_mode = drm_mode_duplicate(dev,
-					dev_priv->vbt.lfp_lvds_vbt_mode);
-		if (fixed_mode) {
-			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
-			connector->display_info.width_mm = fixed_mode->width_mm;
-			connector->display_info.height_mm = fixed_mode->height_mm;
-		}
-	}
+	if (!fixed_mode)
+		fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 30cb9f39c7a29..5f9764835487d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2159,6 +2159,8 @@ extern struct drm_display_mode *intel_find_panel_downclock(
 				struct drm_connector *connector);
 struct drm_display_mode *
 intel_panel_edid_fixed_mode(struct intel_connector *connector);
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector);
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0686ad1f12df7..ccd5ac6d3fcd5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -953,18 +953,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 		goto out;
 
 	/* Failed to get EDID, what about VBT? */
-	if (dev_priv->vbt.lfp_lvds_vbt_mode) {
-		DRM_DEBUG_KMS("using mode from VBT: ");
-		drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
-
-		fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
-		if (fixed_mode) {
-			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
-			connector->display_info.width_mm = fixed_mode->width_mm;
-			connector->display_info.height_mm = fixed_mode->height_mm;
-			goto out;
-		}
-	}
+	fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+	if (fixed_mode)
+		goto out;
 
 	/*
 	 * If we didn't get EDID, try checking if the panel is already turned
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index b9ee18c2dff5b..cf111eba1a935 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -141,6 +141,33 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
 	return fixed_mode;
 }
 
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct drm_display_info *info = &connector->base.display_info;
+	struct drm_display_mode *fixed_mode;
+
+	if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+		return NULL;
+
+	fixed_mode = drm_mode_duplicate(&dev_priv->drm,
+					dev_priv->vbt.lfp_lvds_vbt_mode);
+	if (!fixed_mode)
+		return NULL;
+
+	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
+		      connector->base.base.id, connector->base.name);
+	drm_mode_debug_printmodeline(fixed_mode);
+
+	info->width_mm = fixed_mode->width_mm;
+	info->height_mm = fixed_mode->height_mm;
+
+	return fixed_mode;
+}
+
 /* adjusted_mode has been preset to be the panel's fixed mode */
 void
 intel_pch_panel_fitting(struct intel_crtc *intel_crtc,

From dee2370ce3d1541cf959f1d96cef08d05eaef693 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:44 +0200
Subject: [PATCH 46/95] drm/i915: Adjust DSI fixed mode handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

DSI has its own convoluted way of grabbing the fixed mode from
the VBT. Change it to follow the path laid out by LVDS/eDP.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-4-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/icl_dsi.c       | 14 ++------------
 drivers/gpu/drm/i915/intel_dsi.h     |  1 -
 drivers/gpu/drm/i915/intel_dsi_vbt.c | 18 ------------------
 drivers/gpu/drm/i915/vlv_dsi.c       | 13 ++-----------
 4 files changed, 4 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index beb30d9a855c8..92440ff48f932 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -1361,7 +1361,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	struct intel_encoder *encoder;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
-	struct drm_display_mode *scan, *fixed_mode = NULL;
+	struct drm_display_mode *fixed_mode;
 	enum port port;
 
 	if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1411,15 +1411,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	/* attach connector to encoder */
 	intel_connector_attach_encoder(intel_connector, encoder);
 
-	/* fill mode info from VBT */
 	mutex_lock(&dev->mode_config.mutex);
-	intel_dsi_vbt_get_modes(intel_dsi);
-	list_for_each_entry(scan, &connector->probed_modes, head) {
-		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-			fixed_mode = drm_mode_duplicate(dev, scan);
-			break;
-		}
-	}
+	fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	if (!fixed_mode) {
@@ -1427,12 +1420,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 		goto err;
 	}
 
-	connector->display_info.width_mm = fixed_mode->width_mm;
-	connector->display_info.height_mm = fixed_mode->height_mm;
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
-
 	if (dev_priv->vbt.dsi.config->dual_link)
 		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
 	else
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index a9a19778dc7fc..705a609050c06 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
 
 /* intel_dsi_vbt.c */
 bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
 void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 				 enum mipi_seq seq_id);
 void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index d1e00e4c77266..3074448446bc8 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
 	msleep(msec);
 }
 
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
-{
-	struct intel_connector *connector = intel_dsi->attached_connector;
-	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
-	if (!mode)
-		return 0;
-
-	mode->type |= DRM_MODE_TYPE_PREFERRED;
-
-	drm_mode_probed_add(&connector->base, mode);
-
-	return 1;
-}
-
 #define ICL_PREPARE_CNT_MAX	0x7
 #define ICL_CLK_ZERO_CNT_MAX	0xf
 #define ICL_TRAIL_CNT_MAX	0x7
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 6403728fe7784..01278b2e257c6 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -1658,7 +1658,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	struct drm_encoder *encoder;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
-	struct drm_display_mode *scan, *fixed_mode = NULL;
+	struct drm_display_mode *fixed_mode;
 	enum port port;
 
 	DRM_DEBUG_KMS("\n");
@@ -1769,13 +1769,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
 	mutex_lock(&dev->mode_config.mutex);
-	intel_dsi_vbt_get_modes(intel_dsi);
-	list_for_each_entry(scan, &connector->probed_modes, head) {
-		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
-			fixed_mode = drm_mode_duplicate(dev, scan);
-			break;
-		}
-	}
+	fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	if (!fixed_mode) {
@@ -1783,9 +1777,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 		goto err;
 	}
 
-	connector->display_info.width_mm = fixed_mode->width_mm;
-	connector->display_info.height_mm = fixed_mode->height_mm;
-
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 	intel_panel_setup_backlight(connector, INVALID_PIPE);
 

From 9f6fbe22be90b73ea98a71ecca3599bac5189734 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:45 +0200
Subject: [PATCH 47/95] drm/i915: Stop hand rolling drm_mode_match()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Utilize drm_mode_match() instead of hand rolling it when
looking for the DRRS downclock mode.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-5-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_panel.c | 49 +++++++++++++++---------------
 1 file changed, 24 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cf111eba1a935..4727e74f74376 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -46,6 +46,16 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 }
 
+static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
+			      const struct drm_display_mode *fixed_mode)
+{
+	return drm_mode_match(downclock_mode, fixed_mode,
+			      DRM_MODE_MATCH_TIMINGS |
+			      DRM_MODE_MATCH_FLAGS |
+			      DRM_MODE_MATCH_3D_FLAGS) &&
+		downclock_mode->clock < fixed_mode->clock;
+}
+
 /**
  * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
  * @dev_priv: i915 device instance
@@ -60,11 +70,8 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
 			struct drm_display_mode *fixed_mode,
 			struct drm_connector *connector)
 {
-	struct drm_display_mode *scan, *tmp_mode;
-	int temp_downclock;
-
-	temp_downclock = fixed_mode->clock;
-	tmp_mode = NULL;
+	const struct drm_display_mode *scan, *best_mode = NULL;
+	int best_clock = fixed_mode->clock;
 
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		/*
@@ -74,29 +81,21 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
 		 * case we can set the different FPx0/1 to dynamically select
 		 * between low and high frequency.
 		 */
-		if (scan->hdisplay == fixed_mode->hdisplay &&
-		    scan->hsync_start == fixed_mode->hsync_start &&
-		    scan->hsync_end == fixed_mode->hsync_end &&
-		    scan->htotal == fixed_mode->htotal &&
-		    scan->vdisplay == fixed_mode->vdisplay &&
-		    scan->vsync_start == fixed_mode->vsync_start &&
-		    scan->vsync_end == fixed_mode->vsync_end &&
-		    scan->vtotal == fixed_mode->vtotal) {
-			if (scan->clock < temp_downclock) {
-				/*
-				 * The downclock is already found. But we
-				 * expect to find the lower downclock.
-				 */
-				temp_downclock = scan->clock;
-				tmp_mode = scan;
-			}
+		if (is_downclock_mode(scan, fixed_mode) &&
+		    scan->clock < best_clock) {
+			/*
+			 * The downclock is already found. But we
+			 * expect to find the lower downclock.
+			 */
+			best_clock = scan->clock;
+			best_mode = scan;
 		}
 	}
 
-	if (temp_downclock < fixed_mode->clock)
-		return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
-	else
-		return NULL;
+	if (best_mode)
+		return drm_mode_duplicate(&dev_priv->drm, best_mode);
+
+	return NULL;
 }
 
 struct drm_display_mode *

From abf1aae8259e7f24dc88ebba4a5df1f8fd72f7c8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 21 Mar 2019 15:24:46 +0200
Subject: [PATCH 48/95] drm/i915: Clean up EDID downclock mode lookup
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Rename intel_find_panel_downclock() to intel_panel_edid_downclock_mode()
to make it clear it's looking for the downclock mode in the EDID.
And while at it polish the implementation a bit as well.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321132446.22394-6-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c    |  4 +---
 drivers/gpu/drm/i915/intel_drv.h   |  7 +++----
 drivers/gpu/drm/i915/intel_panel.c | 32 +++++++++++++++---------------
 3 files changed, 20 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 57d769bca3b84..326de12c3f44a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -7033,9 +7033,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
 		return NULL;
 	}
 
-	downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
-						    &connector->base);
-
+	downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
 	if (!downclock_mode) {
 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
 		return NULL;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5f9764835487d..14765a4b31a94 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2153,10 +2153,9 @@ void intel_panel_update_backlight(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-extern struct drm_display_mode *intel_find_panel_downclock(
-				struct drm_i915_private *dev_priv,
-				struct drm_display_mode *fixed_mode,
-				struct drm_connector *connector);
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+				const struct drm_display_mode *fixed_mode);
 struct drm_display_mode *
 intel_panel_edid_fixed_mode(struct intel_connector *connector);
 struct drm_display_mode *
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4727e74f74376..47cd4a338db60 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -56,24 +56,16 @@ static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
 		downclock_mode->clock < fixed_mode->clock;
 }
 
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev_priv: i915 device instance
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
 struct drm_display_mode *
-intel_find_panel_downclock(struct drm_i915_private *dev_priv,
-			struct drm_display_mode *fixed_mode,
-			struct drm_connector *connector)
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+				const struct drm_display_mode *fixed_mode)
 {
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	const struct drm_display_mode *scan, *best_mode = NULL;
+	struct drm_display_mode *downclock_mode;
 	int best_clock = fixed_mode->clock;
 
-	list_for_each_entry(scan, &connector->probed_modes, head) {
+	list_for_each_entry(scan, &connector->base.probed_modes, head) {
 		/*
 		 * If one mode has the same resolution with the fixed_panel
 		 * mode while they have the different refresh rate, it means
@@ -92,10 +84,18 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	if (best_mode)
-		return drm_mode_duplicate(&dev_priv->drm, best_mode);
+	if (!best_mode)
+		return NULL;
+
+	downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
+	if (!downclock_mode)
+		return NULL;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+		      connector->base.base.id, connector->base.name);
+	drm_mode_debug_printmodeline(downclock_mode);
 
-	return NULL;
+	return downclock_mode;
 }
 
 struct drm_display_mode *

From 29f3863d33d1e2ad6c87fe64a61538041bd965bd Mon Sep 17 00:00:00 2001
From: James Ausmus <james.ausmus@intel.com>
Date: Fri, 22 Mar 2019 10:58:42 -0700
Subject: [PATCH 49/95] drm/i915/ehl: Add EHL platform info and PCI IDs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add known EHL PCI IDs.

v2 (Rodrigo): Removed x86 early quirk. To be sent in a separated
   	      patch cc'ing the appropriated list and maintainers for
	      proper ack.
v3: (Rodrigo): - Removed .num_pipes = 3 that is coming since GEN&_FEATURES.
    	       - Added ppgtt type and size after rework from Bob and Chris
v4: (Rodrigo): - remove ppgtt type added on v3. Jose pointed it is not
    	       	 needed.

Cc: Bob Paauwe <bob.j.paauwe@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Bob Paauwe <bob.j.paauwe@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-1-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/i915_pci.c | 9 +++++++++
 include/drm/i915_pciids.h       | 7 +++++++
 2 files changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 7a6054eadb8e9..fa99d68cbe7d5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -730,6 +730,14 @@ static const struct intel_device_info intel_icelake_11_info = {
 		BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 };
 
+static const struct intel_device_info intel_elkhartlake_info = {
+	GEN11_FEATURES,
+	PLATFORM(INTEL_ICELAKE),
+	.is_alpha_support = 1,
+	.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+	.ppgtt_size = 36,
+};
+
 #undef GEN
 #undef PLATFORM
 
@@ -799,6 +807,7 @@ static const struct pci_device_id pciidlist[] = {
 	INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
 	INTEL_CNL_IDS(&intel_cannonlake_info),
 	INTEL_ICL_11_IDS(&intel_icelake_11_info),
+	INTEL_EHL_IDS(&intel_elkhartlake_info),
 	{0, 0, 0}
 };
 MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 291b5e3fa59ce..c7cdbfc4d033b 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -498,4 +498,11 @@
 	INTEL_VGA_DEVICE(0x8A70, info), \
 	INTEL_VGA_DEVICE(0x8A53, info)
 
+/* EHL */
+#define INTEL_EHL_IDS(info) \
+	INTEL_VGA_DEVICE(0x4500, info),	\
+	INTEL_VGA_DEVICE(0x4571, info), \
+	INTEL_VGA_DEVICE(0x4551, info), \
+	INTEL_VGA_DEVICE(0x4541, info)
+
 #endif /* _I915_PCIIDS_H */

From 897f296152c704ef64163c49d94a6e086de1c31a Mon Sep 17 00:00:00 2001
From: Bob Paauwe <bob.j.paauwe@intel.com>
Date: Fri, 22 Mar 2019 10:58:43 -0700
Subject: [PATCH 50/95] drm/i915/ehl: Add ElkhartLake platform
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add ElkhartLake as a unique platform as there are some differences
between it and Icelake.

Signed-off-by: Bob Paauwe <bob.j.paauwe@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-2-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h          | 1 +
 drivers/gpu/drm/i915/i915_pci.c          | 2 +-
 drivers/gpu/drm/i915/intel_device_info.c | 1 +
 drivers/gpu/drm/i915/intel_device_info.h | 1 +
 4 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f723b15527f80..eff0e9aa353db 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2323,6 +2323,7 @@ static inline unsigned int i915_sg_segment_size(void)
 #define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
 #define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
 #define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
+#define IS_ELKHARTLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
 #define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index fa99d68cbe7d5..a7e1611af26d4 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -732,7 +732,7 @@ static const struct intel_device_info intel_icelake_11_info = {
 
 static const struct intel_device_info intel_elkhartlake_info = {
 	GEN11_FEATURES,
-	PLATFORM(INTEL_ICELAKE),
+	PLATFORM(INTEL_ELKHARTLAKE),
 	.is_alpha_support = 1,
 	.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
 	.ppgtt_size = 36,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index eddf83807957e..db00110cbb2e1 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -57,6 +57,7 @@ static const char * const platform_names[] = {
 	PLATFORM_NAME(COFFEELAKE),
 	PLATFORM_NAME(CANNONLAKE),
 	PLATFORM_NAME(ICELAKE),
+	PLATFORM_NAME(ELKHARTLAKE),
 };
 #undef PLATFORM_NAME
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6234570a9b179..98acefaacec91 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -73,6 +73,7 @@ enum intel_platform {
 	INTEL_CANNONLAKE,
 	/* gen11 */
 	INTEL_ICELAKE,
+	INTEL_ELKHARTLAKE,
 	INTEL_MAX_PLATFORMS
 };
 

From e547f2a2fc1349b97117d6cd7f157220063e083e Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 10:58:44 -0700
Subject: [PATCH 51/95] drm/i915/ehl: Add dpll mgr
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Elkhart Lake has a different set of PLLs as compared to Ice Lake,
although programming them is very similar.

v2: Rebase on top of s/icl_pll_funcs/combo_pll_funcs

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-3-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index dfe6a7114d563..eeb659946203c 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -3245,6 +3245,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
 	.dump_hw_state = icl_dump_hw_state,
 };
 
+static const struct dpll_info ehl_plls[] = {
+	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+	{ },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+	.dpll_info = ehl_plls,
+	.get_dpll = icl_get_dpll,
+	.dump_hw_state = icl_dump_hw_state,
+};
+
 /**
  * intel_shared_dpll_init - Initialize shared DPLLs
  * @dev: drm device
@@ -3258,7 +3270,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	const struct dpll_info *dpll_info;
 	int i;
 
-	if (INTEL_GEN(dev_priv) >= 11)
+	if (IS_ELKHARTLAKE(dev_priv))
+		dpll_mgr = &ehl_pll_mgr;
+	else if (INTEL_GEN(dev_priv) >= 11)
 		dpll_mgr = &icl_pll_mgr;
 	else if (IS_CANNONLAKE(dev_priv))
 		dpll_mgr = &cnl_pll_mgr;

From 759c9ab55b5bf315be79e57f6ea32640d183e5a3 Mon Sep 17 00:00:00 2001
From: Bob Paauwe <bob.j.paauwe@intel.com>
Date: Fri, 22 Mar 2019 10:58:45 -0700
Subject: [PATCH 52/95] drm/i915/ehl: EHL outputs are different from ICL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Configure the correct set of outputs for EHL. EHL has three DDI's
plus DSI.

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Bob Paauwe <bob.j.paauwe@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-4-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 17fb9f2c15c28..1a8617f5e2e40 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14713,7 +14713,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 	if (!HAS_DISPLAY(dev_priv))
 		return;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (IS_ELKHARTLAKE(dev_priv)) {
+		intel_ddi_init(dev_priv, PORT_A);
+		intel_ddi_init(dev_priv, PORT_B);
+		intel_ddi_init(dev_priv, PORT_C);
+		icl_dsi_init(dev_priv);
+	} else if (INTEL_GEN(dev_priv) >= 11) {
 		intel_ddi_init(dev_priv, PORT_A);
 		intel_ddi_init(dev_priv, PORT_B);
 		intel_ddi_init(dev_priv, PORT_C);

From 9b7598a99ab4bbc09a4691b06d79fc21210fb22c Mon Sep 17 00:00:00 2001
From: Bob Paauwe <bob.j.paauwe@intel.com>
Date: Fri, 22 Mar 2019 10:58:46 -0700
Subject: [PATCH 53/95] drm/i915/ehl: Set proper eu slice/subslice parameters
 for EHL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

EHL has a different number of subslices.

Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Bob Paauwe <bob.j.paauwe@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-5-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/intel_device_info.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index db00110cbb2e1..e0ac908bb4e98 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -156,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
 	u8 eu_en;
 	int s;
 
-	sseu->max_slices = 1;
-	sseu->max_subslices = 8;
-	sseu->max_eus_per_subslice = 8;
+	if (IS_ELKHARTLAKE(dev_priv)) {
+		sseu->max_slices = 1;
+		sseu->max_subslices = 4;
+		sseu->max_eus_per_subslice = 8;
+	} else {
+		sseu->max_slices = 1;
+		sseu->max_subslices = 8;
+		sseu->max_eus_per_subslice = 8;
+	}
 
 	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
 	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);

From 4b225248daddfe6a48529963a45184a15be83781 Mon Sep 17 00:00:00 2001
From: Anusha Srivatsa <anusha.srivatsa@intel.com>
Date: Fri, 22 Mar 2019 10:58:47 -0700
Subject: [PATCH 54/95] drm/i915/ehl: Add Support for DMC on EHL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

EHL uses the same firmware as ICL.

Cc: Bob Paauwe <bob.j.paauwe@intel.com>
Signed-off-by: Anusha Srivatsa <anusha.srivatsa@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Bob Paauwe <bob.j.paauwe@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322175847.25707-6-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/intel_csr.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index e8ac04c33e290..862a8f686ef57 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -486,7 +486,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) >= 12) {
 		/* Allow to load fw via parameter using the last known size */
 		csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
-	} else if (IS_ICELAKE(dev_priv)) {
+	} else if (IS_GEN(dev_priv, 11)) {
 		csr->fw_path = ICL_CSR_PATH;
 		csr->required_version = ICL_CSR_VERSION_REQUIRED;
 		csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;

From ddad5babb05e92e655cae137c1f8a3ed2c43a685 Mon Sep 17 00:00:00 2001
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Date: Mon, 11 Mar 2019 10:37:47 +0800
Subject: [PATCH 55/95] drm/i915: always pin hw_id for GVT context

Initially found issue with closed context debug check when pin
hw_id for GVT context, looks we should always pin hw_id for that
as GVT context is fixed for each vGPU life cycle, and we'd also
like to get pinned hw_id e.g for perf reason, etc.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190311023747.1426-1-zhenyuw@linux.intel.com
---
 drivers/gpu/drm/i915/i915_gem_context.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 39ca3cb6af666..e6f5946682451 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -482,6 +482,13 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (IS_ERR(ctx))
 		goto out;
 
+	ret = i915_gem_context_pin_hw_id(ctx);
+	if (ret) {
+		context_close(ctx);
+		ctx = ERR_PTR(ret);
+		goto out;
+	}
+
 	ctx->file_priv = ERR_PTR(-EBADF);
 	i915_gem_context_set_closed(ctx); /* not user accessible */
 	i915_gem_context_clear_bannable(ctx);

From 47c3b5e9b38b9da8db62d8c354be722852ea9d83 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 21 Mar 2019 12:00:04 +0000
Subject: [PATCH 56/95] drm/i915/guc: Support for extended GuC notification
 messages

GuC may send notification messages with payload larger than
single u32. Prepare driver to accept longer messages.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tomasz Lis <tomasz.lis@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321120004.53012-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c    | 14 +++++++++++---
 drivers/gpu/drm/i915/intel_guc.h    |  3 ++-
 drivers/gpu/drm/i915/intel_guc_ct.c |  5 +++--
 3 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index a59448a56f55e..f2b4eaee8d526 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -484,17 +484,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
 	spin_unlock(&guc->irq_lock);
 	enable_rpm_wakeref_asserts(dev_priv);
 
-	intel_guc_to_host_process_recv_msg(guc, msg);
+	intel_guc_to_host_process_recv_msg(guc, &msg, 1);
 }
 
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+				       const u32 *payload, u32 len)
 {
+	u32 msg;
+
+	if (unlikely(!len))
+		return -EPROTO;
+
 	/* Make sure to handle only enabled messages */
-	msg &= guc->msg_enabled_mask;
+	msg = payload[0] & guc->msg_enabled_mask;
 
 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
 		intel_guc_log_handle_flush_event(&guc->log);
+
+	return 0;
 }
 
 int intel_guc_sample_forcewake(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 77ec1bd4df5ab..2c59ff8d9f39d 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -165,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 void intel_guc_to_host_event_handler(struct intel_guc *guc);
 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
 void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+				       const u32 *payload, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 79ddb80883116..dde1dc0d6e69d 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -701,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct,
 			       u32 action, u32 len, const u32 *payload)
 {
 	struct intel_guc *guc = ct_to_guc(ct);
+	int ret;
 
 	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
 
 	switch (action) {
 	case INTEL_GUC_ACTION_DEFAULT:
-		if (unlikely(len < 1))
+		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
+		if (unlikely(ret))
 			goto fail_unexpected;
-		intel_guc_to_host_process_recv_msg(guc, *payload);
 		break;
 
 	default:

From dd19f6bf9245dd17fa9e7f89ceda56c930a130d4 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 23 Mar 2019 21:40:09 +0000
Subject: [PATCH 57/95] drm/i915: Remove defunct intel_suspend_gt_powersave()

Since commit b7137e0cf1e5 ("drm/i915: Defer enabling rc6 til after we
submit the first batch/context"), intel_suspend_gt_powersave() has been
a no-op. As we still do not need to do anything explicitly on suspend
(we do everything required on idling), remove the defunct function.

References: b7137e0cf1e5 ("drm/i915: Defer enabling rc6 til after we submit the first batch/context")
Suggested-by: "Hiatt, Don" <don.hiatt@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190323214009.23294-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c  |  1 -
 drivers/gpu/drm/i915/intel_drv.h |  1 -
 drivers/gpu/drm/i915/intel_pm.c  | 16 ----------------
 3 files changed, 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4685ed04b3545..f6cdd5fb9deb9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4397,7 +4397,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	GEM_TRACE("\n");
 
 	wakeref = intel_runtime_pm_get(i915);
-	intel_suspend_gt_powersave(i915);
 
 	flush_workqueue(i915->wq);
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 14765a4b31a94..97fcbbd65e644 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2374,7 +2374,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eaf0793ebf607..a9fff971129e5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8494,22 +8494,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 		pm_runtime_put(&dev_priv->drm.pdev->dev);
 }
 
-/**
- * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev_priv: i915 device
- *
- * We don't want to disable RC6 or other features here, we just want
- * to make sure any work we've queued has finished and won't bother
- * us while we're suspended.
- */
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) < 6)
-		return;
-
-	/* gen6_rps_idle() will be called later to disable interrupts */
-}
-
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 {
 	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */

From 57b1c4460dc46a00f6ec439f3f11d670736b0209 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 22 Mar 2019 22:49:44 +0200
Subject: [PATCH 58/95] drm/i915: Mark AML 0x87CA as ULX
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If I'm reading the spec right AML 0x87CA is a Y SKU, so it
should be marked as ULX in our old style terminology.

Cc: stable@vger.kernel.org
Cc: José Roberto de Souza <jose.souza@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Fixes: c0c46ca461f1 ("drm/i915/aml: Add new Amber Lake PCI ID")
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322204944.23613-1-ville.syrjala@linux.intel.com
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eff0e9aa353db..9c846e7209ffd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2362,7 +2362,8 @@ static inline unsigned int i915_sg_segment_size(void)
 				 INTEL_DEVID(dev_priv) == 0x5915 || \
 				 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x591C || \
-				 INTEL_DEVID(dev_priv) == 0x87C0)
+				 INTEL_DEVID(dev_priv) == 0x87C0 || \
+				 INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \

From d938da6b132a2d6addeba4c57a67ec3c07824843 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 22 Mar 2019 20:08:03 +0200
Subject: [PATCH 59/95] drm/i915: Disable C3 when enabling vblank interrupts on
 i945gm
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The AGPBUSY thing doesn't work on i945gm anymore. This means
the gmch is incapable of waking the CPU from C3 when an interrupt
is generated. The interrupts just get postponed indefinitely until
something wakes up the CPU. This is rather annoying for vblank
interrupts as we are unable to maintain a steady framerate
unless the machine is sufficiently loaded to stay out of C3.

To combat this let's use pm_qos to prevent C3 whenever vblank
interrupts are enabled. To maintain reasonable amount of powersaving
we will attempt to limit this to C3 only while leaving C1 and C2
enabled.

v2: Use READ_ONCE() (Chris)

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30364
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322180804.3300-1-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h |  8 +++
 drivers/gpu/drm/i915/i915_irq.c | 88 +++++++++++++++++++++++++++++++++
 2 files changed, 96 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9c846e7209ffd..11803d485275c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2042,6 +2042,14 @@ struct drm_i915_private {
 		struct i915_vma *scratch;
 	} gt;
 
+	/* For i945gm vblank irq vs. C3 workaround */
+	struct {
+		struct work_struct work;
+		struct pm_qos_request pm_qos;
+		u8 c3_disable_latency;
+		u8 enabled;
+	} i945gm_vblank;
+
 	/* perform PHY state sanity checks? */
 	bool chv_phy_assert[2];
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f788291cfe0b..fcbe173dddddd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
 
 #include <linux/sysrq.h>
 #include <linux/slab.h>
+#include <linux/cpuidle.h>
 #include <linux/circ_buf.h>
 #include <drm/drm_irq.h>
 #include <drm/drm_drv.h>
@@ -3131,6 +3132,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return 0;
 }
 
+static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (dev_priv->i945gm_vblank.enabled++ == 0)
+		schedule_work(&dev_priv->i945gm_vblank.work);
+
+	return i8xx_enable_vblank(dev, pipe);
+}
+
 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3195,6 +3206,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
+static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	i8xx_disable_vblank(dev, pipe);
+
+	if (--dev_priv->i945gm_vblank.enabled == 0)
+		schedule_work(&dev_priv->i945gm_vblank.work);
+}
+
 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3228,6 +3249,60 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
+static void i945gm_vblank_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, i945gm_vblank.work);
+
+	/*
+	 * Vblank interrupts fail to wake up the device from C3,
+	 * hence we want to prevent C3 usage while vblank interrupts
+	 * are enabled.
+	 */
+	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
+			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
+			      dev_priv->i945gm_vblank.c3_disable_latency :
+			      PM_QOS_DEFAULT_VALUE);
+}
+
+static int cstate_disable_latency(const char *name)
+{
+	const struct cpuidle_driver *drv;
+	int i;
+
+	drv = cpuidle_get_driver();
+	if (!drv)
+		return 0;
+
+	for (i = 0; i < drv->state_count; i++) {
+		const struct cpuidle_state *state = &drv->states[i];
+
+		if (!strcmp(state->name, name))
+			return state->exit_latency ?
+				state->exit_latency - 1 : 0;
+	}
+
+	return 0;
+}
+
+static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+{
+	INIT_WORK(&dev_priv->i945gm_vblank.work,
+		  i945gm_vblank_work_func);
+
+	dev_priv->i945gm_vblank.c3_disable_latency =
+		cstate_disable_latency("C3");
+	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+			   PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+}
+
+static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+{
+	cancel_work_sync(&dev_priv->i945gm_vblank.work);
+	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+}
+
 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
 {
 	if (HAS_PCH_NOP(dev_priv))
@@ -4525,6 +4600,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 	int i;
 
+	if (IS_I945GM(dev_priv))
+		i945gm_vblank_work_init(dev_priv);
+
 	intel_hpd_init_work(dev_priv);
 
 	INIT_WORK(&rps->work, gen6_pm_rps_work);
@@ -4647,6 +4725,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 			dev->driver->irq_uninstall = i8xx_irq_reset;
 			dev->driver->enable_vblank = i8xx_enable_vblank;
 			dev->driver->disable_vblank = i8xx_disable_vblank;
+		} else if (IS_I945GM(dev_priv)) {
+			dev->driver->irq_preinstall = i915_irq_reset;
+			dev->driver->irq_postinstall = i915_irq_postinstall;
+			dev->driver->irq_uninstall = i915_irq_reset;
+			dev->driver->irq_handler = i915_irq_handler;
+			dev->driver->enable_vblank = i945gm_enable_vblank;
+			dev->driver->disable_vblank = i945gm_disable_vblank;
 		} else if (IS_GEN(dev_priv, 3)) {
 			dev->driver->irq_preinstall = i915_irq_reset;
 			dev->driver->irq_postinstall = i915_irq_postinstall;
@@ -4677,6 +4762,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
 {
 	int i;
 
+	if (IS_I945GM(i915))
+		i945gm_vblank_work_fini(i915);
+
 	for (i = 0; i < MAX_L3_SLICES; ++i)
 		kfree(i915->l3_parity.remap_info[i]);
 }

From 0df3f09d00ae9659a250014cdbd0a14dca83a561 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 22 Mar 2019 20:08:04 +0200
Subject: [PATCH 60/95] drm/i915: Use vblank_disable_immediate on gen2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The vblank timestamp->counter guesstimator seems to be
working sufficiently well, so there's no reason not to
disable vblank interrupts ASAP even on gen2.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322180804.3300-2-ville.syrjala@linux.intel.com
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_irq.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fcbe173dddddd..455b2bf691b5c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -4642,13 +4642,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	else if (INTEL_GEN(dev_priv) >= 3)
 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
 
-	/*
-	 * Opt out of the vblank disable timer on everything except gen2.
-	 * Gen2 doesn't have a hardware frame counter and so depends on
-	 * vblank interrupts to produce sane vblank seuquence numbers.
-	 */
-	if (!IS_GEN(dev_priv, 2))
-		dev->vblank_disable_immediate = true;
+	dev->vblank_disable_immediate = true;
 
 	/* Most platforms treat the display irq block as an always-on
 	 * power domain. vlv/chv can disable it at runtime and need

From 32c13bcd3544405f3c37ae6130ab14b313294222 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 25 Mar 2019 09:03:52 +0000
Subject: [PATCH 61/95] drm/i915: Report the correct errno from
 i915_gem_context_open()

Fixup the errno as we adjusted the error path to receive the errno and
not compute it itself from ERR_PTR(ctx) anymore.

drivers/gpu/drm/i915/i915_gem_context.c:793 i915_gem_context_open() warn: passing a valid pointer to 'PTR_ERR'

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 3aa9945a528e ("drm/i915: Separate GEM context construction and registration to userspace")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325090413.19906-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e6f5946682451..25f267a03d3d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -709,7 +709,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	idr_destroy(&file_priv->context_idr);
 	mutex_destroy(&file_priv->vm_idr_lock);
 	mutex_destroy(&file_priv->context_idr_lock);
-	return PTR_ERR(ctx);
+	return err;
 }
 
 void i915_gem_context_close(struct drm_file *file)

From b024ab9b2d3aa1551ed0be371a01a0ba426bbb1f Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Fri, 22 Mar 2019 14:10:08 +0200
Subject: [PATCH 62/95] drm/i915/bios: iterate over child devices to initialize
 ddi_port_info
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Iterate over child devices instead of ports in parse_ddi_ports() to
initialize ddi_port_info. We'll eventually need to decide some stuff
based on the child device order, which may be different from the port
order.

As a bonus, this allows better abstractions for e.g. dvo port mapping.

There's a subtle change in the DDC pin and AUX channel sanitization as
we change the order. Otherwise, this should not change behaviour.

Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322121008.4456-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h   |   1 +
 drivers/gpu/drm/i915/intel_bios.c | 104 +++++++++++++++++-------------
 2 files changed, 59 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 11803d485275c..4e30f7b19b51d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -957,6 +957,7 @@ struct ddi_vbt_port_info {
 #define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
 	u8 hdmi_level_shift;
 
+	u8 present:1;
 	u8 supports_dvi:1;
 	u8 supports_hdmi:1;
 	u8 supports_dp:1;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 64f20175a6dcc..1dc8d03ff1279 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1247,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
 	if (!info->alternate_ddc_pin)
 		return;
 
-	for_each_port_masked(p, (1 << port) - 1) {
+	for (p = PORT_A; p < I915_MAX_PORTS; p++) {
 		struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
 
-		if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+		if (p == port || !i->present ||
+		    info->alternate_ddc_pin != i->alternate_ddc_pin)
 			continue;
 
 		DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
@@ -1264,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
 		 * port. Otherwise they share the same ddc bin and
 		 * system couldn't communicate with them separately.
 		 *
-		 * Due to parsing the ports in alphabetical order,
-		 * a higher port will always clobber a lower one.
+		 * Due to parsing the ports in child device order,
+		 * a later device will always clobber an earlier one.
 		 */
 		i->supports_dvi = false;
 		i->supports_hdmi = false;
@@ -1283,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 	if (!info->alternate_aux_channel)
 		return;
 
-	for_each_port_masked(p, (1 << port) - 1) {
+	for (p = PORT_A; p < I915_MAX_PORTS; p++) {
 		struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
 
-		if (info->alternate_aux_channel != i->alternate_aux_channel)
+		if (p == port || !i->present ||
+		    info->alternate_aux_channel != i->alternate_aux_channel)
 			continue;
 
 		DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
@@ -1300,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 		 * port. Otherwise they share the same aux channel
 		 * and system couldn't communicate with them separately.
 		 *
-		 * Due to parsing the ports in alphabetical order,
-		 * a higher port will always clobber a lower one.
+		 * Due to parsing the ports in child device order,
+		 * a later device will always clobber an earlier one.
 		 */
 		i->supports_dp = false;
 		i->alternate_aux_channel = 0;
@@ -1349,49 +1351,58 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 	return 0;
 }
 
-static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
-			   u8 bdb_version)
+static enum port dvo_port_to_port(u8 dvo_port)
 {
-	struct child_device_config *it, *child = NULL;
-	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
-	int i, j;
-	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-	/* Each DDI port can have more than one value on the "DVO Port" field,
+	/*
+	 * Each DDI port can have more than one value on the "DVO Port" field,
 	 * so look for all the possible values for each port.
 	 */
-	int dvo_ports[][3] = {
-		{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
-		{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
-		{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
-		{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
-		{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
-		{DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+	static const int dvo_ports[][3] = {
+		[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+		[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+		[PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+		[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+		[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+		[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
 	};
+	enum port port;
+	int i;
 
-	/*
-	 * Find the first child device to reference the port, report if more
-	 * than one found.
-	 */
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		it = dev_priv->vbt.child_dev + i;
-
-		for (j = 0; j < 3; j++) {
-			if (dvo_ports[port][j] == -1)
+	for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
+		for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
+			if (dvo_ports[port][i] == -1)
 				break;
 
-			if (it->dvo_port == dvo_ports[port][j]) {
-				if (child) {
-					DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
-						      port_name(port));
-				} else {
-					child = it;
-				}
-			}
+			if (dvo_port == dvo_ports[port][i])
+				return port;
 		}
 	}
-	if (!child)
+
+	return PORT_NONE;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv,
+			   const struct child_device_config *child,
+			   u8 bdb_version)
+{
+	struct ddi_vbt_port_info *info;
+	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+	enum port port;
+
+	port = dvo_port_to_port(child->dvo_port);
+	if (port == PORT_NONE)
 		return;
 
+	info = &dev_priv->vbt.ddi_port_info[port];
+
+	if (info->present) {
+		DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+			      port_name(port));
+		return;
+	}
+
+	info->present = true;
+
 	is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
 	is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@@ -1523,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 
 static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
 {
-	enum port port;
+	const struct child_device_config *child;
+	int i;
 
 	if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		return;
 
-	if (!dev_priv->vbt.child_dev_num)
-		return;
-
 	if (bdb_version < 155)
 		return;
 
-	for (port = PORT_A; port < I915_MAX_PORTS; port++)
-		parse_ddi_port(dev_priv, port, bdb_version);
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		child = dev_priv->vbt.child_dev + i;
+
+		parse_ddi_port(dev_priv, child, bdb_version);
+	}
 }
 
 static void

From 97ee6e925552438600f6c244595e3f07612bbf6f Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Thu, 21 Mar 2019 17:24:31 -0700
Subject: [PATCH 63/95] drm/i915: stop storing the media fuse

We're already updating the engine_mask to reflect what's in the HW, so
we can just get the info from there. A couple of macros have been added
to facilitate this.

v2: Appease checkpatch

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322002431.9585-1-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h          | 11 +++++++++++
 drivers/gpu/drm/i915/intel_device_info.c | 16 +++++++++-------
 drivers/gpu/drm/i915/intel_device_info.h |  4 ----
 3 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4e30f7b19b51d..380198628d837 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2454,6 +2454,17 @@ static inline unsigned int i915_sg_segment_size(void)
 #define ALL_ENGINES	(~0u)
 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
 
+#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({		\
+	unsigned int first__ = (first);					\
+	unsigned int count__ = (count);					\
+	(INTEL_INFO(dev_priv)->engine_mask &				\
+	 GENMASK(first__ + count__ - 1, first__)) >> first__		\
+})
+#define VDBOX_MASK(dev_priv) \
+	ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(dev_priv) \
+	ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+
 #define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
 #define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
 #define HAS_EDRAM(dev_priv)	(!!((dev_priv)->edram_cap & EDRAM_ENABLED))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e0ac908bb4e98..5776a0def7ac9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -878,22 +878,24 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 	unsigned int logical_vdbox = 0;
 	unsigned int i;
 	u32 media_fuse;
+	u16 vdbox_mask;
+	u16 vebox_mask;
 
 	if (INTEL_GEN(dev_priv) < 11)
 		return;
 
 	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
 
-	RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
-	RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
-		GEN11_GT_VEBOX_DISABLE_SHIFT;
+	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+		      GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-	DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
+	DRM_DEBUG_DRIVER("vdbox enable: %04x\n", vdbox_mask);
 	for (i = 0; i < I915_MAX_VCS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VCS(i)))
 			continue;
 
-		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
+		if (!(BIT(i) & vdbox_mask)) {
 			info->engine_mask &= ~BIT(_VCS(i));
 			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
 			continue;
@@ -907,12 +909,12 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
 	}
 
-	DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
+	DRM_DEBUG_DRIVER("vebox enable: %04x\n", vebox_mask);
 	for (i = 0; i < I915_MAX_VECS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VECS(i)))
 			continue;
 
-		if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
+		if (!(BIT(i) & vebox_mask)) {
 			info->engine_mask &= ~BIT(_VECS(i));
 			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
 		}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 98acefaacec91..7e04b4829abac 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -209,10 +209,6 @@ struct intel_runtime_info {
 
 	u32 cs_timestamp_frequency_khz;
 
-	/* Enabled (not fused off) media engine bitmasks. */
-	u8 vdbox_enable;
-	u8 vebox_enable;
-
 	/* Media engine access to SFC per instance */
 	u8 vdbox_sfc_access;
 };

From 602cbe8efc523ba56e1f41e8f74c7aa835672593 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Tue, 26 Mar 2019 08:08:43 +0300
Subject: [PATCH 64/95] drm/i915/selftests: Fix an IS_ERR() vs NULL check

The live_context() function returns error pointers.  It never returns
NULL.

Fixes: 9c1477e83e62 ("drm/i915/selftests: Exercise adding requests to a full GGTT")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326050843.GA20038@kadam
---
 drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 9a9451846b33f..89766688e420c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
 			struct i915_gem_context *ctx;
 
 			ctx = live_context(i915, file);
-			if (!ctx)
+			if (IS_ERR(ctx))
 				break;
 
 			/* We will need some GGTT space for the rq's context */

From 947f44174684eb0499f2255c256a8b61ae985057 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 15:37:47 -0700
Subject: [PATCH 65/95] drm/i915/skl: use previous pll hw readout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

By the time skl_ddi_clock_get() is called - and thus
skl_calc_wrpll_link() - we've just got the hw state from the pll
registers. We don't need to read them again: we can rather reuse what
was cached in the dpll_hw_state.

v2: rename state variable to pll_state, make argument const in
    skl_calc_wrpll_link() and remove not useful warning (from Ville)

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322223751.22089-2-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 50 +++++++++++++-------------------
 1 file changed, 20 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933df3a57a8ab..a995b463aa338 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1240,24 +1240,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
 	return (refclk * n * 100) / (p * r);
 }
 
-static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			       enum intel_dpll_id pll_id)
+static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
 {
-	i915_reg_t cfgcr1_reg, cfgcr2_reg;
-	u32 cfgcr1_val, cfgcr2_val;
 	u32 p0, p1, p2, dco_freq;
 
-	cfgcr1_reg = DPLL_CFGCR1(pll_id);
-	cfgcr2_reg = DPLL_CFGCR2(pll_id);
+	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
 
-	cfgcr1_val = I915_READ(cfgcr1_reg);
-	cfgcr2_val = I915_READ(cfgcr2_reg);
-
-	p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
-	p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
-
-	if (cfgcr2_val &  DPLL_CFGCR2_QDIV_MODE(1))
-		p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
+		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
 	else
 		p1 = 1;
 
@@ -1292,10 +1283,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
+		* 24 * 1000;
 
-	dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
-		1000) / 0x8000;
+	dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
+		     * 24 * 1000) / 0x8000;
 
 	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
 		return 0;
@@ -1544,22 +1536,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
 }
 
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
-				struct intel_crtc_state *pipe_config)
+			      struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	int link_clock = 0;
-	u32 dpll_ctl1;
-	enum intel_dpll_id pll_id;
-
-	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+	int link_clock;
 
-	dpll_ctl1 = I915_READ(DPLL_CTRL1);
-
-	if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
-		link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
+	/*
+	 * ctrl1 register is already shifted for each pll, just use 0 to get
+	 * the internal shift for each field
+	 */
+	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
+		link_clock = skl_calc_wrpll_link(pll_state);
 	} else {
-		link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
-		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
+		link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
+		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
 
 		switch (link_clock) {
 		case DPLL_CTRL1_LINK_RATE_810:

From 47c9877e9be326f2dfaca5c771919ea3bb7e0ef8 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 15:37:48 -0700
Subject: [PATCH 66/95] drm/i915/bxt: make bxt_calc_pll_link() similar to skl
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Rename state to pll_state and use it as the argument to
bxt_calc_pll_link(), similar to how it's done in the skl variant.

The WARN_ON(!crtc_state->shared_dpll) is not very useful, so remove it
as well.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322223751.22089-3-lucas.demarchi@intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ddi.c | 24 +++++++++---------------
 1 file changed, 9 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a995b463aa338..975dbf6c7fba2 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1629,24 +1629,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 	ddi_dotclock_get(pipe_config);
 }
 
-static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
+static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
 {
-	struct intel_dpll_hw_state *state;
 	struct dpll clock;
 
-	/* For DDI ports we always use a shared PLL. */
-	if (WARN_ON(!crtc_state->shared_dpll))
-		return 0;
-
-	state = &crtc_state->dpll_hw_state;
-
 	clock.m1 = 2;
-	clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
-	if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
-		clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
-	clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
-	clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
-	clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
 
 	return chv_calc_dpll_params(100000, &clock);
 }
@@ -1654,7 +1647,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
 			      struct intel_crtc_state *pipe_config)
 {
-	pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
+	pipe_config->port_clock =
+		bxt_calc_pll_link(&pipe_config->dpll_hw_state);
 
 	ddi_dotclock_get(pipe_config);
 }

From 5e65216d8dd1765d02c6d519a61e95b80e518344 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 15:37:49 -0700
Subject: [PATCH 67/95] drm/i915/cnl: use previous pll hw readout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

By the time cnl_ddi_clock_get() is called we've just got the hw state
from the pll registers. We don't need to read them again: we can rather
reuse what was cached in the dpll_hw_state.

This also affects the code for ICL since it partially reuses the CNL
code. However the more intricate part on ICL is left for another patch.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322223751.22089-4-lucas.demarchi@intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/icl_dsi.c   |  5 ++--
 drivers/gpu/drm/i915/intel_ddi.c | 44 ++++++++++++--------------------
 drivers/gpu/drm/i915/intel_drv.h |  2 +-
 3 files changed, 19 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 92440ff48f932..1395338c6772c 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -1179,11 +1179,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	u32 pll_id;
 
 	/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
-	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
-	pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+	pipe_config->port_clock =
+		cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
 	pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
 }
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 975dbf6c7fba2..0e8f20d9ab2a2 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1296,24 +1296,15 @@ static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
 }
 
 int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			enum intel_dpll_id pll_id)
+			struct intel_dpll_hw_state *pll_state)
 {
-	u32 cfgcr0, cfgcr1;
 	u32 p0, p1, p2, dco_freq, ref_clock;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
-		cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
-		cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
-	} else {
-		cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-		cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
-	}
-
-	p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
-	p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
 
-	if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
-		p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
 	else
 		p1 = 1;
@@ -1348,9 +1339,10 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 
 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
 
-	dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
+		* ref_clock;
 
-	dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
 		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
 
 	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
@@ -1463,13 +1455,14 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
 			      struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
 	enum port port = encoder->port;
-	int link_clock = 0;
+	int link_clock;
 	u32 pll_id;
 
 	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 	if (intel_port_is_combophy(dev_priv, port)) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
 	} else {
 		if (pll_id == DPLL_ID_ICL_TBTPLL)
 			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
@@ -1485,18 +1478,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
 			      struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	int link_clock = 0;
-	u32 cfgcr0;
-	enum intel_dpll_id pll_id;
-
-	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
-
-	cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+	int link_clock;
 
-	if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
 	} else {
-		link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+		link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
 
 		switch (link_clock) {
 		case DPLL_CFGCR0_LINK_RATE_810:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 97fcbbd65e644..6102d21a8ba81 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1659,7 +1659,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 				     bool enable);
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
 int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			enum intel_dpll_id pll_id);
+			struct intel_dpll_hw_state *state);
 
 unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
 				   int color_plane, unsigned int height);

From 02c99d26f5200210c1080f4bf13d344329d85cd8 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 15:37:50 -0700
Subject: [PATCH 68/95] drm/i915/icl: use previous pll hw readout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

By the time icl_ddi_clock_get() is called we've just got the hw state
from the pll registers. We don't need to read them again: we can rather
reuse what was cached in the dpll_hw_state.

While at it, s/refclk/ref_clock/ just to be consistent with the name
used in code nearby.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322223751.22089-5-lucas.demarchi@intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ddi.c | 37 ++++++++++++++++----------------
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0e8f20d9ab2a2..63005239d0925 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1374,25 +1374,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
 }
 
 static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
-				enum port port)
+				const struct intel_dpll_hw_state *pll_state)
 {
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	u32 mg_pll_div0, mg_clktop_hsclkctl;
-	u32 m1, m2_int, m2_frac, div1, div2, refclk;
+	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
 	u64 tmp;
 
-	refclk = dev_priv->cdclk.hw.ref;
-
-	mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
-	mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+	ref_clock = dev_priv->cdclk.hw.ref;
 
-	m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
-	m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-	m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
-		  (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
-		  MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+	m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+	m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+	m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+		(pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+		MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
 
-	switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+	switch (pll_state->mg_clktop2_hsclkctl &
+		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
 		div1 = 2;
 		break;
@@ -1406,12 +1402,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
 		div1 = 7;
 		break;
 	default:
-		MISSING_CASE(mg_clktop_hsclkctl);
+		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
 		return 0;
 	}
 
-	div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+	div2 = (pll_state->mg_clktop2_hsclkctl &
+		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
 	/* div2 value of 0 is same as 1 means no div */
 	if (div2 == 0)
 		div2 = 1;
@@ -1420,8 +1418,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
 	 * Adjust the original formula to delay the division by 2^22 in order to
 	 * minimize possible rounding errors.
 	 */
-	tmp = (u64)m1 * m2_int * refclk +
-	      (((u64)m1 * m2_frac * refclk) >> 22);
+	tmp = (u64)m1 * m2_int * ref_clock +
+	      (((u64)m1 * m2_frac * ref_clock) >> 22);
 	tmp = div_u64(tmp, 5 * div1 * div2);
 
 	return tmp;
@@ -1467,10 +1465,11 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
 		if (pll_id == DPLL_ID_ICL_TBTPLL)
 			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
 		else
-			link_clock = icl_calc_mg_pll_link(dev_priv, port);
+			link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
 	}
 
 	pipe_config->port_clock = link_clock;
+
 	ddi_dotclock_get(pipe_config);
 }
 

From 077973c8c37fff683b607f7d0966db1eee47fb4f Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Fri, 22 Mar 2019 15:37:51 -0700
Subject: [PATCH 69/95] drm/i915/icl: reduce pll_id scope and use enum type
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Now that pll_id is not used anymore for combophy, reduce its scope.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322223751.22089-6-lucas.demarchi@intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ddi.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63005239d0925..ba3de1079e63a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1456,12 +1456,13 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
 	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
 	enum port port = encoder->port;
 	int link_clock;
-	u32 pll_id;
 
-	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 	if (intel_port_is_combophy(dev_priv, port)) {
 		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
 	} else {
+		enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
+						pipe_config->shared_dpll);
+
 		if (pll_id == DPLL_ID_ICL_TBTPLL)
 			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
 		else

From 6cc5ca76882521998c5138cd1ba67659e8f7f58c Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:32 -0700
Subject: [PATCH 70/95] drm/i915: rename raw reg access functions

They now work on uncore, so use raw_uncore_ prefix. Also move them to
uncore.h

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-2-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h     | 32 ++----------------
 drivers/gpu/drm/i915/i915_vgpu.c    |  6 ++--
 drivers/gpu/drm/i915/intel_uncore.c | 50 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_uncore.h | 27 ++++++++++++++++
 4 files changed, 58 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 380198628d837..681bcbd5c06b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3518,32 +3518,6 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
 
-#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(const struct intel_uncore *uncore, \
-					     i915_reg_t reg) \
-{ \
-	return read##s(uncore->regs + i915_mmio_reg_offset(reg)); \
-}
-
-#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(const struct intel_uncore *uncore, \
-				       i915_reg_t reg, uint##x##_t val) \
-{ \
-	write##s(val, uncore->regs + i915_mmio_reg_offset(reg)); \
-}
-__raw_read(8, b)
-__raw_read(16, w)
-__raw_read(32, l)
-__raw_read(64, q)
-
-__raw_write(8, b)
-__raw_write(16, w)
-__raw_write(32, l)
-__raw_write(64, q)
-
-#undef __raw_read
-#undef __raw_write
-
 /* These are untraced mmio-accessors that are only valid to be used inside
  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
  * controlled.
@@ -3570,9 +3544,9 @@ __raw_write(64, q)
  * therefore generally be serialised, by either the dev_priv->uncore.lock or
  * a more localised lock guarding all access to that bank of registers.
  */
-#define I915_READ_FW(reg__) __raw_i915_read32(&dev_priv->uncore, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(&dev_priv->uncore, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(&dev_priv->uncore, (reg__), (val__))
+#define I915_READ_FW(reg__) __raw_uncore_read32(&dev_priv->uncore, (reg__))
+#define I915_WRITE_FW(reg__, val__) __raw_uncore_write32(&dev_priv->uncore, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_uncore_write64(&dev_priv->uncore, (reg__), (val__))
 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
 /* "Broadcast RGB" property */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 3d0b493e42003..94d3992b599d2 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -66,17 +66,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-	magic = __raw_i915_read64(uncore, vgtif_reg(magic));
+	magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
 	if (magic != VGT_MAGIC)
 		return;
 
-	version_major = __raw_i915_read16(uncore, vgtif_reg(version_major));
+	version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
 	if (version_major < VGT_VERSION_MAJOR) {
 		DRM_INFO("VGT interface version mismatch!\n");
 		return;
 	}
 
-	dev_priv->vgpu.caps = __raw_i915_read32(uncore, vgtif_reg(vgt_caps));
+	dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
 
 	dev_priv->vgpu.active = true;
 	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1816eeae3ba92..02341c189803e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -31,7 +31,7 @@
 #define FORCEWAKE_ACK_TIMEOUT_MS 50
 #define GT_FIFO_TIMEOUT_MS	 10
 
-#define __raw_posting_read(uncore__, reg__) (void)__raw_i915_read32((uncore__), (reg__))
+#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
 
 static const char * const forcewake_domain_names[] = {
 	"render",
@@ -279,7 +279,7 @@ static inline u32 gt_thread_status(struct intel_uncore *uncore)
 {
 	u32 val;
 
-	val = __raw_i915_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
+	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
 
 	return val;
@@ -306,7 +306,7 @@ static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
 
 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
 {
-	u32 count = __raw_i915_read32(uncore, GTFIFOCTL);
+	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
 
 	return count & GT_FIFO_FREE_ENTRIES_MASK;
 }
@@ -451,8 +451,8 @@ static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
 	if (IS_HASWELL(dev_priv) ||
 	    IS_BROADWELL(dev_priv) ||
 	    INTEL_GEN(dev_priv) >= 9) {
-		dev_priv->edram_cap = __raw_i915_read32(&dev_priv->uncore,
-							HSW_EDRAM_CAP);
+		dev_priv->edram_cap = __raw_uncore_read32(&dev_priv->uncore,
+							  HSW_EDRAM_CAP);
 
 		/* NB: We can't write IDICR yet because we do not have gt funcs
 		 * set up */
@@ -470,11 +470,11 @@ fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
 	u32 dbg;
 
-	dbg = __raw_i915_read32(uncore, FPGA_DBG);
+	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
 		return false;
 
-	__raw_i915_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
 	return true;
 }
@@ -484,11 +484,11 @@ vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
 	u32 cer;
 
-	cer = __raw_i915_read32(uncore, CLAIM_ER);
+	cer = __raw_uncore_read32(uncore, CLAIM_ER);
 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
 		return false;
 
-	__raw_i915_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
+	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
 
 	return true;
 }
@@ -498,11 +498,11 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
 {
 	u32 fifodbg;
 
-	fifodbg = __raw_i915_read32(uncore, GTFIFODBG);
+	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
 
 	if (unlikely(fifodbg)) {
 		DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
-		__raw_i915_write32(uncore, GTFIFODBG, fifodbg);
+		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
 	}
 
 	return fifodbg;
@@ -537,10 +537,10 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 
 	/* WaDisableShadowRegForCpd:chv */
 	if (IS_CHERRYVIEW(i915)) {
-		__raw_i915_write32(uncore, GTFIFOCTL,
-				   __raw_i915_read32(uncore, GTFIFOCTL) |
-				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
-				   GT_FIFO_CTL_RC6_POLICY_STALL);
+		__raw_uncore_write32(uncore, GTFIFOCTL,
+				     __raw_uncore_read32(uncore, GTFIFOCTL) |
+				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+				     GT_FIFO_CTL_RC6_POLICY_STALL);
 	}
 
 	iosf_mbi_punit_acquire();
@@ -1068,7 +1068,7 @@ ilk_dummy_write(struct intel_uncore *uncore)
 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
 	 * hence harmless to write 0 into. */
-	__raw_i915_write32(uncore, MI_MODE, 0);
+	__raw_uncore_write32(uncore, MI_MODE, 0);
 }
 
 static void
@@ -1110,7 +1110,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 static u##x \
 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
-	val = __raw_i915_read##x(uncore, reg); \
+	val = __raw_uncore_read##x(uncore, reg); \
 	GEN2_READ_FOOTER; \
 }
 
@@ -1119,7 +1119,7 @@ static u##x \
 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
 	ilk_dummy_write(uncore); \
-	val = __raw_i915_read##x(uncore, reg); \
+	val = __raw_uncore_read##x(uncore, reg); \
 	GEN2_READ_FOOTER; \
 }
 
@@ -1189,7 +1189,7 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
 	fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
 	if (fw_engine) \
 		__force_wake_auto(uncore, fw_engine); \
-	val = __raw_i915_read##x(uncore, reg); \
+	val = __raw_uncore_read##x(uncore, reg); \
 	GEN6_READ_FOOTER; \
 }
 #define __gen6_read(x) __gen_read(gen6, x)
@@ -1226,7 +1226,7 @@ __gen6_read(64)
 static void \
 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
-	__raw_i915_write##x(uncore, reg, val); \
+	__raw_uncore_write##x(uncore, reg, val); \
 	GEN2_WRITE_FOOTER; \
 }
 
@@ -1235,7 +1235,7 @@ static void \
 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
 	ilk_dummy_write(uncore); \
-	__raw_i915_write##x(uncore, reg, val); \
+	__raw_uncore_write##x(uncore, reg, val); \
 	GEN2_WRITE_FOOTER; \
 }
 
@@ -1271,7 +1271,7 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 	GEN6_WRITE_HEADER; \
 	if (NEEDS_FORCE_WAKE(offset)) \
 		__gen6_gt_wait_for_fifo(uncore); \
-	__raw_i915_write##x(uncore, reg, val); \
+	__raw_uncore_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 
@@ -1283,7 +1283,7 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
 	fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
 	if (fw_engine) \
 		__force_wake_auto(uncore, fw_engine); \
-	__raw_i915_write##x(uncore, reg, val); \
+	__raw_uncore_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 #define __gen8_write(x) __gen_write(gen8, x)
@@ -1470,7 +1470,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		 * before the ecobus check.
 		 */
 
-		__raw_i915_write32(uncore, FORCEWAKE, 0);
+		__raw_uncore_write32(uncore, FORCEWAKE, 0);
 		__raw_posting_read(uncore, ECOBUS);
 
 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
@@ -1478,7 +1478,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 
 		spin_lock_irq(&uncore->lock);
 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
-		ecobus = __raw_i915_read32(uncore, ECOBUS);
+		ecobus = __raw_uncore_read32(uncore, ECOBUS);
 		fw_domains_put(uncore, FORCEWAKE_RENDER);
 		spin_unlock_irq(&uncore->lock);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index d345e5ab04a54..f7670cbc41c98 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -215,6 +215,33 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
 					    2, timeout_ms, NULL);
 }
 
+/* register access functions */
+#define __raw_read(x__, s__) \
+static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
+					    i915_reg_t reg) \
+{ \
+	return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+
+#define __raw_write(x__, s__) \
+static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
+					   i915_reg_t reg, u##x__ val) \
+{ \
+	write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+__raw_read(8, b)
+__raw_read(16, w)
+__raw_read(32, l)
+__raw_read(64, q)
+
+__raw_write(8, b)
+__raw_write(16, w)
+__raw_write(32, l)
+__raw_write(64, q)
+
+#undef __raw_read
+#undef __raw_write
+
 #define raw_reg_read(base, reg) \
 	readl(base + i915_mmio_reg_offset(reg))
 #define raw_reg_write(base, reg, value) \

From 5a0ba777098a9b90f88ccca2c53dd7382bfba30c Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:33 -0700
Subject: [PATCH 71/95] drm/i915: add HAS_FORCEWAKE flag to uncore

We have several cases where we don't have forcewake (older gens, GVT and
planned display-only uncore), so, instead of checking every time against
the various condition, save the info in a flag and use that.

Note that this patch also change the behavior for gen5 with vpgu
enabled, but this is not an issue since we don't support vgpu on gen5.

v2: split out from previous path, fix check for missing case (Paulo)
v3: Inline helper for clarity in testing flags

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-3-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_uncore.c | 31 +++++++++++++++++++----------
 drivers/gpu/drm/i915/intel_uncore.h |  9 +++++++++
 2 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 02341c189803e..6ea6351f51236 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1391,7 +1391,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 {
 	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
-	if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
+	if (!intel_uncore_has_forcewake(uncore))
 		return;
 
 	if (INTEL_GEN(i915) >= 11) {
@@ -1590,6 +1590,9 @@ int intel_uncore_init(struct intel_uncore *uncore)
 
 	i915_check_vgpu(i915);
 
+	if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+		uncore->flags |= UNCORE_HAS_FORCEWAKE;
+
 	intel_uncore_edram_detect(i915);
 	intel_uncore_fw_domains_init(uncore);
 	__intel_uncore_early_sanitize(uncore, 0);
@@ -1598,12 +1601,14 @@ int intel_uncore_init(struct intel_uncore *uncore)
 	uncore->pmic_bus_access_nb.notifier_call =
 		i915_pmic_bus_access_notifier;
 
-	if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
-		ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
-	} else if (IS_GEN(i915, 5)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
-		ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+	if (!intel_uncore_has_forcewake(uncore)) {
+		if (IS_GEN(i915, 5)) {
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+		} else {
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+		}
 	} else if (IS_GEN_RANGE(i915, 6, 7)) {
 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
 
@@ -1912,7 +1917,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
 	} else {
-		WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+		/* on devices with FW we expect to hit one of the above cases */
+		if (intel_uncore_has_forcewake(uncore))
+			MISSING_CASE(INTEL_GEN(dev_priv));
+
 		fw_domains = 0;
 	}
 
@@ -1938,7 +1946,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
 		fw_domains = FORCEWAKE_RENDER;
 	} else {
-		WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+		/* on devices with FW we expect to hit one of the above cases */
+		if (intel_uncore_has_forcewake(uncore))
+			MISSING_CASE(INTEL_GEN(dev_priv));
+
 		fw_domains = 0;
 	}
 
@@ -1969,7 +1980,7 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
 
 	WARN_ON(!op);
 
-	if (intel_vgpu_active(dev_priv))
+	if (!intel_uncore_has_forcewake(&dev_priv->uncore))
 		return 0;
 
 	if (op & FW_REG_READ)
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index f7670cbc41c98..fa675f0f60bdf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -97,6 +97,9 @@ struct intel_uncore {
 
 	spinlock_t lock; /** lock is also taken in irq contexts. */
 
+	unsigned int flags;
+#define UNCORE_HAS_FORCEWAKE		BIT(0)
+
 	const struct intel_forcewake_range *fw_domains_table;
 	unsigned int fw_domains_table_entries;
 
@@ -143,6 +146,12 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
 	return container_of(d, struct intel_uncore, fw_domain[d->id]);
 }
 
+static inline bool
+intel_uncore_has_forcewake(const struct intel_uncore *uncore)
+{
+	return uncore->flags & UNCORE_HAS_FORCEWAKE;
+}
+
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
 int intel_uncore_init(struct intel_uncore *uncore);
 void intel_uncore_prune(struct intel_uncore *uncore);

From 2cf7bf6f2f206700d1560d68f1464b4cb7882291 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:34 -0700
Subject: [PATCH 72/95] drm/i915: add uncore flags for unclaimed mmio

Save the HW capabilities to avoid having to jump back to dev_priv
every time.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-4-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c               |  4 +-
 drivers/gpu/drm/i915/intel_display.c          |  2 +-
 drivers/gpu/drm/i915/intel_hangcheck.c        |  2 +-
 drivers/gpu/drm/i915/intel_uncore.c           | 57 ++++++++++---------
 drivers/gpu/drm/i915/intel_uncore.h           | 25 +++++++-
 drivers/gpu/drm/i915/selftests/intel_uncore.c | 15 ++---
 6 files changed, 65 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d3df0ebaa09c9..622b175b05fdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2887,7 +2887,7 @@ static int intel_runtime_suspend(struct device *kdev)
 	enable_rpm_wakeref_asserts(dev_priv);
 	intel_runtime_pm_cleanup(dev_priv);
 
-	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
 		DRM_ERROR("Unclaimed access detected prior to suspending\n");
 
 	dev_priv->runtime_pm.suspended = true;
@@ -2941,7 +2941,7 @@ static int intel_runtime_resume(struct device *kdev)
 
 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
 	dev_priv->runtime_pm.suspended = false;
-	if (intel_uncore_unclaimed_mmio(dev_priv))
+	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
 		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
 	if (INTEL_GEN(dev_priv) >= 11) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1a8617f5e2e40..61c7bbd066e31 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13533,7 +13533,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		 * so enable debugging for the next modeset - and hope we catch
 		 * the culprit.
 		 */
-		intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 57ed49dc19c4f..125662c649341 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -270,7 +270,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 	 * periodically arm the mmio checker to see if we are triggering
 	 * any invalid access.
 	 */
-	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+	intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
 
 	for_each_engine(engine, dev_priv, id) {
 		struct hangcheck hc;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6ea6351f51236..d7ce80cb55d8b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -509,18 +509,17 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
 }
 
 static bool
-check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	bool ret = false;
 
-	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
+	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
 		ret |= fpga_check_for_unclaimed_mmio(uncore);
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	if (intel_uncore_has_dbg_unclaimed(uncore))
 		ret |= vlv_check_for_unclaimed_mmio(uncore);
 
-	if (IS_GEN_RANGE(dev_priv, 6, 7))
+	if (intel_uncore_has_fifo(uncore))
 		ret |= gen6_check_for_fifo_debug(uncore);
 
 	return ret;
@@ -529,14 +528,12 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 					  unsigned int restore_forcewake)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
-
 	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(i915))
+	if (check_for_unclaimed_mmio(uncore))
 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	/* WaDisableShadowRegForCpd:chv */
-	if (IS_CHERRYVIEW(i915)) {
+	if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
 		__raw_uncore_write32(uncore, GTFIFOCTL,
 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
@@ -549,7 +546,7 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 		spin_lock_irq(&uncore->lock);
 		uncore->funcs.force_wake_get(uncore, restore_forcewake);
 
-		if (IS_GEN_RANGE(i915, 6, 7))
+		if (intel_uncore_has_fifo(uncore))
 			uncore->fifo_count = fifo_free_entries(uncore);
 		spin_unlock_irq(&uncore->lock);
 	}
@@ -668,12 +665,10 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
  */
 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
-
 	spin_lock_irq(&uncore->lock);
 	if (!--uncore->user_forcewake.count) {
-		if (intel_uncore_unclaimed_mmio(i915))
-			dev_info(i915->drm.dev,
+		if (intel_uncore_unclaimed_mmio(uncore))
+			dev_info(uncore_to_i915(uncore)->drm.dev,
 				 "Invalid mmio detected during user access\n");
 
 		uncore->unclaimed_mmio_check =
@@ -1072,12 +1067,12 @@ ilk_dummy_write(struct intel_uncore *uncore)
 }
 
 static void
-__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+__unclaimed_reg_debug(struct intel_uncore *uncore,
 		      const i915_reg_t reg,
 		      const bool read,
 		      const bool before)
 {
-	if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+	if (WARN(check_for_unclaimed_mmio(uncore) && !before,
 		 "Unclaimed %s register 0x%x\n",
 		 read ? "read from" : "write to",
 		 i915_mmio_reg_offset(reg)))
@@ -1086,7 +1081,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 }
 
 static inline void
-unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+unclaimed_reg_debug(struct intel_uncore *uncore,
 		    const i915_reg_t reg,
 		    const bool read,
 		    const bool before)
@@ -1094,7 +1089,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
 	if (likely(!i915_modparams.mmio_debug))
 		return;
 
-	__unclaimed_reg_debug(dev_priv, reg, read, before);
+	__unclaimed_reg_debug(uncore, reg, read, before);
 }
 
 #define GEN2_READ_HEADER(x) \
@@ -1145,10 +1140,10 @@ __gen2_read(64)
 	u##x val = 0; \
 	assert_rpm_wakelock_held(dev_priv); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
-	unclaimed_reg_debug(dev_priv, reg, true, true)
+	unclaimed_reg_debug(uncore, reg, true, true)
 
 #define GEN6_READ_FOOTER \
-	unclaimed_reg_debug(dev_priv, reg, true, false); \
+	unclaimed_reg_debug(uncore, reg, true, false); \
 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
@@ -1259,10 +1254,10 @@ __gen2_write(32)
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	assert_rpm_wakelock_held(dev_priv); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
-	unclaimed_reg_debug(dev_priv, reg, false, true)
+	unclaimed_reg_debug(uncore, reg, false, true)
 
 #define GEN6_WRITE_FOOTER \
-	unclaimed_reg_debug(dev_priv, reg, false, false); \
+	unclaimed_reg_debug(uncore, reg, false, false); \
 	spin_unlock_irqrestore(&uncore->lock, irqflags)
 
 #define __gen6_write(x) \
@@ -1638,6 +1633,15 @@ int intel_uncore_init(struct intel_uncore *uncore)
 		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
 	}
 
+	if (HAS_FPGA_DBG_UNCLAIMED(i915))
+		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
+
+	if (IS_GEN_RANGE(i915, 6, 7))
+		uncore->flags |= UNCORE_HAS_FIFO;
+
 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 
 	return 0;
@@ -1869,15 +1873,14 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
 {
-	return check_for_unclaimed_mmio(dev_priv);
+	return check_for_unclaimed_mmio(uncore);
 }
 
 bool
-intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	bool ret = false;
 
 	spin_lock_irq(&uncore->lock);
@@ -1885,7 +1888,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
 	if (unlikely(uncore->unclaimed_mmio_check <= 0))
 		goto out;
 
-	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+	if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
 		if (!i915_modparams.mmio_debug) {
 			DRM_DEBUG("Unclaimed register detected, "
 				  "enabling oneshot unclaimed register reporting. "
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index fa675f0f60bdf..b940d48d15fa0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -99,6 +99,9 @@ struct intel_uncore {
 
 	unsigned int flags;
 #define UNCORE_HAS_FORCEWAKE		BIT(0)
+#define UNCORE_HAS_FPGA_DBG_UNCLAIMED	BIT(1)
+#define UNCORE_HAS_DBG_UNCLAIMED	BIT(2)
+#define UNCORE_HAS_FIFO			BIT(3)
 
 	const struct intel_forcewake_range *fw_domains_table;
 	unsigned int fw_domains_table_entries;
@@ -152,11 +155,29 @@ intel_uncore_has_forcewake(const struct intel_uncore *uncore)
 	return uncore->flags & UNCORE_HAS_FORCEWAKE;
 }
 
+static inline bool
+intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+	return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+	return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_fifo(const struct intel_uncore *uncore)
+{
+	return uncore->flags & UNCORE_HAS_FIFO;
+}
+
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
 int intel_uncore_init(struct intel_uncore *uncore);
 void intel_uncore_prune(struct intel_uncore *uncore);
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
-bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
+bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
 void intel_uncore_fini(struct intel_uncore *uncore);
 void intel_uncore_suspend(struct intel_uncore *uncore);
 void intel_uncore_resume_early(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index c69a1bad89335..799aeedac91b1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -244,6 +244,7 @@ static int live_forcewake_domains(void *arg)
 {
 #define FW_RANGE 0x40000
 	struct drm_i915_private *dev_priv = arg;
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long *valid;
 	u32 offset;
 	int err;
@@ -263,31 +264,31 @@ static int live_forcewake_domains(void *arg)
 	if (!valid)
 		return -ENOMEM;
 
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
-	check_for_unclaimed_mmio(dev_priv);
+	check_for_unclaimed_mmio(uncore);
 	for (offset = 0; offset < FW_RANGE; offset += 4) {
 		i915_reg_t reg = { offset };
 
 		(void)I915_READ_FW(reg);
-		if (!check_for_unclaimed_mmio(dev_priv))
+		if (!check_for_unclaimed_mmio(uncore))
 			set_bit(offset, valid);
 	}
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 
 	err = 0;
 	for_each_set_bit(offset, valid, FW_RANGE) {
 		i915_reg_t reg = { offset };
 
 		iosf_mbi_punit_acquire();
-		intel_uncore_forcewake_reset(&dev_priv->uncore);
+		intel_uncore_forcewake_reset(uncore);
 		iosf_mbi_punit_release();
 
-		check_for_unclaimed_mmio(dev_priv);
+		check_for_unclaimed_mmio(uncore);
 
 		(void)I915_READ(reg);
-		if (check_for_unclaimed_mmio(dev_priv)) {
+		if (check_for_unclaimed_mmio(uncore)) {
 			pr_err("Unclaimed mmio read to register 0x%04x\n",
 			       offset);
 			err = -EINVAL;

From eb17af67eb1e2c57cc4fff9f59982d0ce6e9c802 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:35 -0700
Subject: [PATCH 73/95] drm/i915: take a ref to the rpm in the uncore structure

Remove a bit of pointer dancing in the reg access path.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-5-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_drv.h    | 16 +++++++++++-----
 drivers/gpu/drm/i915/intel_uncore.c | 16 +++++++++-------
 drivers/gpu/drm/i915/intel_uncore.h |  3 +++
 3 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6102d21a8ba81..ab30067fe872a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2269,20 +2269,26 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 			    u8 req_slices);
 
 static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *i915)
+assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
 {
-	WARN_ONCE(i915->runtime_pm.suspended,
+	WARN_ONCE(rpm->suspended,
 		  "Device suspended during HW access\n");
 }
 
 static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
 {
-	assert_rpm_device_not_suspended(i915);
-	WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
+	assert_rpm_device_not_suspended(rpm);
+	WARN_ONCE(!atomic_read(&rpm->wakeref_count),
 		  "RPM wakelock ref not held during HW access");
 }
 
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
+{
+	__assert_rpm_wakelock_held(&i915->runtime_pm);
+}
+
 /**
  * disable_rpm_wakeref_asserts - disable the RPM assert checks
  * @i915: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d7ce80cb55d8b..78e4d5f2320e0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -342,7 +342,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 	struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
 	unsigned long irqflags;
 
-	assert_rpm_device_not_suspended(uncore_to_i915(uncore));
+	assert_rpm_device_not_suspended(uncore->rpm);
 
 	if (xchg(&domain->active, false))
 		return HRTIMER_RESTART;
@@ -623,7 +623,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	assert_rpm_wakelock_held(uncore_to_i915(uncore));
+	__assert_rpm_wakelock_held(uncore->rpm);
 
 	spin_lock_irqsave(&uncore->lock, irqflags);
 	__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -777,7 +777,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	assert_rpm_wakelock_held(uncore_to_i915(uncore));
+	__assert_rpm_wakelock_held(uncore->rpm);
 
 	fw_domains &= uncore->fw_domains;
 	WARN(fw_domains & ~uncore->fw_domains_active,
@@ -1095,7 +1095,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
 #define GEN2_READ_HEADER(x) \
 	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u##x val = 0; \
-	assert_rpm_wakelock_held(dev_priv);
+	__assert_rpm_wakelock_held(uncore->rpm);
 
 #define GEN2_READ_FOOTER \
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1138,7 +1138,7 @@ __gen2_read(64)
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	u##x val = 0; \
-	assert_rpm_wakelock_held(dev_priv); \
+	__assert_rpm_wakelock_held(uncore->rpm); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(uncore, reg, true, true)
 
@@ -1213,7 +1213,7 @@ __gen6_read(64)
 #define GEN2_WRITE_HEADER \
 	struct intel_uncore *uncore = &dev_priv->uncore; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	assert_rpm_wakelock_held(dev_priv); \
+	__assert_rpm_wakelock_held(uncore->rpm); \
 
 #define GEN2_WRITE_FOOTER
 
@@ -1252,7 +1252,7 @@ __gen2_write(32)
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	assert_rpm_wakelock_held(dev_priv); \
+	__assert_rpm_wakelock_held(uncore->rpm); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(uncore, reg, false, true)
 
@@ -1596,6 +1596,8 @@ int intel_uncore_init(struct intel_uncore *uncore)
 	uncore->pmic_bus_access_nb.notifier_call =
 		i915_pmic_bus_access_notifier;
 
+	uncore->rpm = &i915->runtime_pm;
+
 	if (!intel_uncore_has_forcewake(uncore)) {
 		if (IS_GEN(i915, 5)) {
 			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b940d48d15fa0..112571c3f4113 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -32,6 +32,7 @@
 #include "i915_reg.h"
 
 struct drm_i915_private;
+struct i915_runtime_pm;
 struct intel_uncore;
 
 enum forcewake_domain_id {
@@ -95,6 +96,8 @@ struct intel_forcewake_range {
 struct intel_uncore {
 	void __iomem *regs;
 
+	struct i915_runtime_pm *rpm;
+
 	spinlock_t lock; /** lock is also taken in irq contexts. */
 
 	unsigned int flags;

From a2b4abfc626b13d2f2728f11435c8f3c4b9b39d8 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:36 -0700
Subject: [PATCH 74/95] drm/i915: switch uncore mmio funcs to use intel_uncore

The full read/write ops can now work on the intel_uncore struct.
Introduce intel_uncore_read/write functions working on intel_uncore
and switch the I915_READ/WRITE macro to internally call those.

v2: no change
v3: add intel_uncore_read/write functions (Chris), update commit msg

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-6-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h              |  49 ++++-----
 drivers/gpu/drm/i915/intel_uncore.c          |  18 ++--
 drivers/gpu/drm/i915/intel_uncore.h          | 106 +++++++++++++++++--
 drivers/gpu/drm/i915/selftests/mock_uncore.c |   4 +-
 4 files changed, 130 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 681bcbd5c06b9..b8df3eb28d35a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3476,18 +3476,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
 }
 
-#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+#define __I915_REG_OP(op__, dev_priv__, ...) \
+	intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
 
-#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+#define I915_READ8(reg__)	  __I915_REG_OP(read8, dev_priv, (reg__))
+#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
 
-#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
-#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+#define I915_READ16(reg__)	   __I915_REG_OP(read16, dev_priv, (reg__))
+#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
+#define I915_READ16_NOTRACE(reg__)	   __I915_REG_OP(read16_notrace, dev_priv, (reg__))
+#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
+
+#define I915_READ(reg__)	 __I915_REG_OP(read, dev_priv, (reg__))
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
+#define I915_READ_NOTRACE(reg__)	 __I915_REG_OP(read_notrace, dev_priv, (reg__))
+#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
 
 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
  * will be implemented using 2 32-bit writes in an arbitrary order with
@@ -3503,20 +3506,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
  *
  * You have been warned.
  */
-#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
-
-#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
-	u32 upper, lower, old_upper, loop = 0;				\
-	upper = I915_READ(upper_reg);					\
-	do {								\
-		old_upper = upper;					\
-		lower = I915_READ(lower_reg);				\
-		upper = I915_READ(upper_reg);				\
-	} while (upper != old_upper && loop++ < 2);			\
-	(u64)upper << 32 | lower; })
+#define I915_READ64(reg__)	__I915_REG_OP(read64, dev_priv, (reg__))
+#define I915_READ64_2x32(lower_reg__, upper_reg__) \
+	__I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
 
-#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
-#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+#define POSTING_READ(reg__)	__I915_REG_OP(posting_read, dev_priv, (reg__))
+#define POSTING_READ16(reg__)	__I915_REG_OP(posting_read16, dev_priv, (reg__))
 
 /* These are untraced mmio-accessors that are only valid to be used inside
  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3544,10 +3539,10 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
  * therefore generally be serialised, by either the dev_priv->uncore.lock or
  * a more localised lock guarding all access to that bank of registers.
  */
-#define I915_READ_FW(reg__) __raw_uncore_read32(&dev_priv->uncore, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_uncore_write32(&dev_priv->uncore, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_uncore_write64(&dev_priv->uncore, (reg__), (val__))
-#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
+#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
 
 /* "Broadcast RGB" property */
 #define INTEL_BROADCAST_RGB_AUTO 0
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 78e4d5f2320e0..7b7708506d647 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1093,7 +1093,6 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
 }
 
 #define GEN2_READ_HEADER(x) \
-	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u##x val = 0; \
 	__assert_rpm_wakelock_held(uncore->rpm);
 
@@ -1103,7 +1102,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
 
 #define __gen2_read(x) \
 static u##x \
-gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
 	val = __raw_uncore_read##x(uncore, reg); \
 	GEN2_READ_FOOTER; \
@@ -1111,7 +1110,7 @@ gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 
 #define __gen5_read(x) \
 static u##x \
-gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
 	GEN2_READ_HEADER(x); \
 	ilk_dummy_write(uncore); \
 	val = __raw_uncore_read##x(uncore, reg); \
@@ -1134,7 +1133,6 @@ __gen2_read(64)
 #undef GEN2_READ_HEADER
 
 #define GEN6_READ_HEADER(x) \
-	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	u##x val = 0; \
@@ -1178,7 +1176,7 @@ static inline void __force_wake_auto(struct intel_uncore *uncore,
 
 #define __gen_read(func, x) \
 static u##x \
-func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
 	fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
@@ -1211,7 +1209,6 @@ __gen6_read(64)
 #undef GEN6_READ_HEADER
 
 #define GEN2_WRITE_HEADER \
-	struct intel_uncore *uncore = &dev_priv->uncore; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	__assert_rpm_wakelock_held(uncore->rpm); \
 
@@ -1219,7 +1216,7 @@ __gen6_read(64)
 
 #define __gen2_write(x) \
 static void \
-gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
 	__raw_uncore_write##x(uncore, reg, val); \
 	GEN2_WRITE_FOOTER; \
@@ -1227,7 +1224,7 @@ gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 
 #define __gen5_write(x) \
 static void \
-gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
 	GEN2_WRITE_HEADER; \
 	ilk_dummy_write(uncore); \
 	__raw_uncore_write##x(uncore, reg, val); \
@@ -1248,7 +1245,6 @@ __gen2_write(32)
 #undef GEN2_WRITE_HEADER
 
 #define GEN6_WRITE_HEADER \
-	struct intel_uncore *uncore = &dev_priv->uncore; \
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
@@ -1262,7 +1258,7 @@ __gen2_write(32)
 
 #define __gen6_write(x) \
 static void \
-gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
 	GEN6_WRITE_HEADER; \
 	if (NEEDS_FORCE_WAKE(offset)) \
 		__gen6_gt_wait_for_fifo(uncore); \
@@ -1272,7 +1268,7 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 
 #define __gen_write(func, x) \
 static void \
-func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
 	fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 112571c3f4113..b7c3b829b3cbd 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -69,20 +69,20 @@ struct intel_uncore_funcs {
 	void (*force_wake_put)(struct intel_uncore *uncore,
 			       enum forcewake_domains domains);
 
-	u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+	u8 (*mmio_readb)(struct intel_uncore *uncore,
 			 i915_reg_t r, bool trace);
-	u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+	u16 (*mmio_readw)(struct intel_uncore *uncore,
 			  i915_reg_t r, bool trace);
-	u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+	u32 (*mmio_readl)(struct intel_uncore *uncore,
 			  i915_reg_t r, bool trace);
-	u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+	u64 (*mmio_readq)(struct intel_uncore *uncore,
 			  i915_reg_t r, bool trace);
 
-	void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+	void (*mmio_writeb)(struct intel_uncore *uncore,
 			    i915_reg_t r, u8 val, bool trace);
-	void (*mmio_writew)(struct drm_i915_private *dev_priv,
+	void (*mmio_writew)(struct intel_uncore *uncore,
 			    i915_reg_t r, u16 val, bool trace);
-	void (*mmio_writel)(struct drm_i915_private *dev_priv,
+	void (*mmio_writel)(struct intel_uncore *uncore,
 			    i915_reg_t r, u32 val, bool trace);
 };
 
@@ -275,6 +275,98 @@ __raw_write(64, q)
 #undef __raw_read
 #undef __raw_write
 
+#define __uncore_read(name__, x__, s__, trace__) \
+static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
+					   i915_reg_t reg) \
+{ \
+	return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
+}
+
+#define __uncore_write(name__, x__, s__, trace__) \
+static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
+					 i915_reg_t reg, u##x__ val) \
+{ \
+	uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
+}
+
+__uncore_read(read8, 8, b, true)
+__uncore_read(read16, 16, w, true)
+__uncore_read(read, 32, l, true)
+__uncore_read(read16_notrace, 16, w, false)
+__uncore_read(read_notrace, 32, l, false)
+
+__uncore_write(write8, 8, b, true)
+__uncore_write(write16, 16, w, true)
+__uncore_write(write, 32, l, true)
+__uncore_write(write_notrace, 32, l, false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * uncore->funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actually support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
+ */
+__uncore_read(read64, 64, q, true)
+
+static inline u64
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
+			 i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+	u32 upper, lower, old_upper, loop = 0;
+	upper = intel_uncore_read(uncore, upper_reg);
+	do {
+		old_upper = upper;
+		lower = intel_uncore_read(uncore, lower_reg);
+		upper = intel_uncore_read(uncore, upper_reg);
+	} while (upper != old_upper && loop++ < 2);
+	return (u64)upper << 32 | lower;
+}
+
+#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
+#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
+
+#undef __uncore_read
+#undef __uncore_write
+
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
+ * controlled.
+ *
+ * Think twice, and think again, before using these.
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&uncore->lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&uncore->lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
+#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
+#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
+#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
+
 #define raw_reg_read(base, reg) \
 	readl(base + i915_mmio_reg_offset(reg))
 #define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index c3896c1fd5515..ff8999c63a12b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -26,14 +26,14 @@
 
 #define __nop_write(x) \
 static void \
-nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
 __nop_write(8)
 __nop_write(16)
 __nop_write(32)
 
 #define __nop_read(x) \
 static u##x \
-nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
 __nop_read(8)
 __nop_read(16)
 __nop_read(32)

From 4319382e9b1bbcf39aa6bfc89db7a431776b693a Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:37 -0700
Subject: [PATCH 75/95] drm/i915: switch intel_uncore_forcewake_for_reg to
 intel_uncore

The intel_uncore structure is the owner of FW, so subclass the
function to it.

While at it, use a local uncore var and switch to the new read/write
functions where it makes sense.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-7-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/gvt/mmio_context.c       | 11 +++---
 drivers/gpu/drm/i915/intel_engine_cs.c        | 21 +++++-----
 drivers/gpu/drm/i915/intel_guc.c              |  2 +-
 drivers/gpu/drm/i915/intel_pm.c               | 13 ++++---
 drivers/gpu/drm/i915/intel_uncore.c           | 38 +++++++++----------
 drivers/gpu/drm/i915/intel_uncore.h           |  2 +-
 drivers/gpu/drm/i915/intel_workarounds.c      |  2 +-
 drivers/gpu/drm/i915/selftests/intel_uncore.c |  2 +-
 8 files changed, 47 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index a00a807a1d559..76630fbe51b6d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -327,6 +327,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	enum forcewake_domains fw;
 	i915_reg_t reg;
@@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	 * otherwise device can go to RC6 state and interrupt invalidation
 	 * process
 	 */
-	fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+	fw = intel_uncore_forcewake_for_reg(uncore, reg,
 					    FW_REG_READ | FW_REG_WRITE);
 	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
 		fw |= FORCEWAKE_RENDER;
 
-	intel_uncore_forcewake_get(&dev_priv->uncore, fw);
+	intel_uncore_forcewake_get(uncore, fw);
 
-	I915_WRITE_FW(reg, 0x1);
+	intel_uncore_write_fw(uncore, reg, 0x1);
 
-	if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+	if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
 		gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
 	else
 		vgpu_vreg_t(vgpu, reg) = 0;
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, fw);
+	intel_uncore_forcewake_put(uncore, fw);
 
 	gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
 }
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index c5b4173271325..eb0fe9a58481b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -888,6 +888,7 @@ static inline u32
 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 		  int subslice, i915_reg_t reg)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 mcr_slice_subslice_mask;
 	u32 mcr_slice_subslice_select;
 	u32 default_mcr_s_ss_select;
@@ -909,33 +910,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 
 	default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
 
-	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
 						    FW_REG_READ);
-	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
 						     GEN8_MCR_SELECTOR,
 						     FW_REG_READ | FW_REG_WRITE);
 
-	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
+	spin_lock_irq(&uncore->lock);
+	intel_uncore_forcewake_get__locked(uncore, fw_domains);
 
-	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+	mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
 
 	WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
 		     default_mcr_s_ss_select);
 
 	mcr &= ~mcr_slice_subslice_mask;
 	mcr |= mcr_slice_subslice_select;
-	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
 
-	ret = I915_READ_FW(reg);
+	ret = intel_uncore_read_fw(uncore, reg);
 
 	mcr &= ~mcr_slice_subslice_mask;
 	mcr |= default_mcr_s_ss_select;
 
-	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
 
-	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	intel_uncore_forcewake_put__locked(uncore, fw_domains);
+	spin_unlock_irq(&uncore->lock);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index f2b4eaee8d526..dcb88e7b79c40 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
 	BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
 
 	for (i = 0; i < guc->send_regs.count; i++) {
-		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+		fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
 					guc_send_reg(guc, i),
 					FW_REG_READ | FW_REG_WRITE);
 	}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a9fff971129e5..e5e030f4f43f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9959,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 			   const i915_reg_t reg)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u64 time_hw, prev_hw, overflow_hw;
 	unsigned int fw_domains;
 	unsigned long flags;
@@ -9980,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 	if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
 		return 0;
 
-	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-	intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
+	spin_lock_irqsave(&uncore->lock, flags);
+	intel_uncore_forcewake_get__locked(uncore, fw_domains);
 
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -10002,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 		}
 
 		overflow_hw = BIT_ULL(32);
-		time_hw = I915_READ_FW(reg);
+		time_hw = intel_uncore_read_fw(uncore, reg);
 	}
 
 	/*
@@ -10024,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
 	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
 
-	intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+	intel_uncore_forcewake_put__locked(uncore, fw_domains);
+	spin_unlock_irqrestore(&uncore->lock, flags);
 
 	return mul_u64_u32_div(time_hw, mul, div);
 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 7b7708506d647..8b7ef47faf2e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1841,7 +1841,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned fw =
-		intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
 	u32 reg_value;
 	int ret;
 
@@ -1904,23 +1904,23 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
 }
 
 static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
 				i915_reg_t reg)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
-	} else if (HAS_FWTABLE(dev_priv)) {
+	} else if (HAS_FWTABLE(i915)) {
 		fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
-	} else if (INTEL_GEN(dev_priv) >= 6) {
+	} else if (INTEL_GEN(i915) >= 6) {
 		fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
 	} else {
 		/* on devices with FW we expect to hit one of the above cases */
 		if (intel_uncore_has_forcewake(uncore))
-			MISSING_CASE(INTEL_GEN(dev_priv));
+			MISSING_CASE(INTEL_GEN(i915));
 
 		fw_domains = 0;
 	}
@@ -1931,25 +1931,25 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 }
 
 static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
 				 i915_reg_t reg)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
-	} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+	} else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
 		fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
-	} else if (IS_GEN(dev_priv, 8)) {
+	} else if (IS_GEN(i915, 8)) {
 		fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
-	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
+	} else if (IS_GEN_RANGE(i915, 6, 7)) {
 		fw_domains = FORCEWAKE_RENDER;
 	} else {
 		/* on devices with FW we expect to hit one of the above cases */
 		if (intel_uncore_has_forcewake(uncore))
-			MISSING_CASE(INTEL_GEN(dev_priv));
+			MISSING_CASE(INTEL_GEN(i915));
 
 		fw_domains = 0;
 	}
@@ -1962,7 +1962,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 /**
  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  * 				    a register
- * @dev_priv: pointer to struct drm_i915_private
+ * @uncore: pointer to struct intel_uncore
  * @reg: register in question
  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
  *
@@ -1974,21 +1974,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
  * callers to do FIFO management on their own or risk losing writes.
  */
 enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 			       i915_reg_t reg, unsigned int op)
 {
 	enum forcewake_domains fw_domains = 0;
 
 	WARN_ON(!op);
 
-	if (!intel_uncore_has_forcewake(&dev_priv->uncore))
+	if (!intel_uncore_has_forcewake(uncore))
 		return 0;
 
 	if (op & FW_REG_READ)
-		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+		fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
 
 	if (op & FW_REG_WRITE)
-		fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+		fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
 
 	return fw_domains;
 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b7c3b829b3cbd..e3c2c37c51e2e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -193,7 +193,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 
 enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 			       i915_reg_t reg, unsigned int op);
 #define FW_REG_READ  (1)
 #define FW_REG_WRITE (2)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index e758bbf506176..a04dbc58ec1cf 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -905,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
 	unsigned int i;
 
 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
-		fw |= intel_uncore_forcewake_for_reg(dev_priv,
+		fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
 						     wa->reg,
 						     FW_REG_READ |
 						     FW_REG_WRITE);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 799aeedac91b1..ee0bc91f76648 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -184,7 +184,7 @@ static int live_forcewake_ops(void *arg)
 		if (!engine->default_state)
 			continue;
 
-		fw_domains = intel_uncore_forcewake_for_reg(i915, mmio,
+		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
 							    FW_REG_READ);
 		if (!fw_domains)
 			continue;

From d2d551c06f81775baae3afb69fae3252271190f9 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:38 -0700
Subject: [PATCH 76/95] drm/i915: intel_wait_for_register_fw to uncore

The intel_uncore structure is the owner of register access, so
subclass the function to it.

While at it, use a local uncore var and switch to the new read/write
functions where it makes sense.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-8-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_reset.c       | 36 ++++++++++++++-----------
 drivers/gpu/drm/i915/intel_dpio_phy.c   |  3 ++-
 drivers/gpu/drm/i915/intel_engine_cs.c  | 10 +++----
 drivers/gpu/drm/i915/intel_guc.c        | 11 ++++----
 drivers/gpu/drm/i915/intel_huc_fw.c     | 27 +++++++++++--------
 drivers/gpu/drm/i915/intel_i2c.c        |  2 +-
 drivers/gpu/drm/i915/intel_pm.c         |  4 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 18 ++++++-------
 drivers/gpu/drm/i915/intel_uncore.c     | 11 ++++----
 drivers/gpu/drm/i915/intel_uncore.h     | 34 ++++++++++++++---------
 10 files changed, 88 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0aea19cefe4ac..17f802a8f8f04 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -245,10 +245,12 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 			     unsigned int engine_mask,
 			     unsigned int retry)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	int ret;
 
-	I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
-	ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+	intel_uncore_write_fw(uncore, ILK_GDSR,
+			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
 					   ILK_GRDOM_RESET_ENABLE, 0,
 					   5000, 0,
 					   NULL);
@@ -257,8 +259,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 		goto out;
 	}
 
-	I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
-	ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+	intel_uncore_write_fw(uncore, ILK_GDSR,
+			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
 					   ILK_GRDOM_RESET_ENABLE, 0,
 					   5000, 0,
 					   NULL);
@@ -268,8 +271,8 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 	}
 
 out:
-	I915_WRITE_FW(ILK_GDSR, 0);
-	POSTING_READ_FW(ILK_GDSR);
+	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
+	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
 	return ret;
 }
 
@@ -277,6 +280,7 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
 				u32 hw_domain_mask)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	int err;
 
 	/*
@@ -284,10 +288,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
 	 * for fifo space for the write or forcewake the chip for
 	 * the read
 	 */
-	I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
 
 	/* Wait for the device to ack the reset requests */
-	err = __intel_wait_for_register_fw(dev_priv,
+	err = __intel_wait_for_register_fw(uncore,
 					   GEN6_GDRST, hw_domain_mask, 0,
 					   500, 0,
 					   NULL);
@@ -330,6 +334,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
 static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
 			  struct intel_engine_cs *engine)
 {
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
 	i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
 	u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
@@ -377,10 +382,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
 	 * ends up being locked to the engine we want to reset, we have to reset
 	 * it as well (we will unlock it once the reset sequence is completed).
 	 */
-	I915_WRITE_FW(sfc_forced_lock,
-		      I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+	intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
 
-	if (__intel_wait_for_register_fw(dev_priv,
+	if (__intel_wait_for_register_fw(uncore,
 					 sfc_forced_lock_ack,
 					 sfc_forced_lock_ack_bit,
 					 sfc_forced_lock_ack_bit,
@@ -389,7 +393,7 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
 		return 0;
 	}
 
-	if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+	if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
 		return sfc_reset_bit;
 
 	return 0;
@@ -465,13 +469,13 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
 
 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_uncore *uncore = &engine->i915->uncore;
 	int ret;
 
-	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
-		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+	intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
+			      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
 
-	ret = __intel_wait_for_register_fw(dev_priv,
+	ret = __intel_wait_for_register_fw(uncore,
 					   RING_RESET_CTL(engine->mmio_base),
 					   RESET_CTL_READY_TO_RESET,
 					   RESET_CTL_READY_TO_RESET,
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 95cb8b154f879..a0591b64070b7 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -383,7 +383,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 	 * The flag should get set in 100us according to the HW team, but
 	 * use 1ms due to occasional timeouts observed with that.
 	 */
-	if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+	if (intel_wait_for_register_fw(&dev_priv->uncore,
+				       BXT_PORT_CL1CM_DW0(phy),
 				       PHY_RESERVED | PHY_POWER_GOOD,
 				       PHY_POWER_GOOD,
 				       1))
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index eb0fe9a58481b..c936c6df34e47 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -817,20 +817,20 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
 
 int intel_engine_stop_cs(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_uncore *uncore = &engine->i915->uncore;
 	const u32 base = engine->mmio_base;
 	const i915_reg_t mode = RING_MI_MODE(base);
 	int err;
 
-	if (INTEL_GEN(dev_priv) < 3)
+	if (INTEL_GEN(engine->i915) < 3)
 		return -ENODEV;
 
 	GEM_TRACE("%s\n", engine->name);
 
-	I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
 
 	err = 0;
-	if (__intel_wait_for_register_fw(dev_priv,
+	if (__intel_wait_for_register_fw(uncore,
 					 mode, MODE_IDLE, MODE_IDLE,
 					 1000, 0,
 					 NULL)) {
@@ -839,7 +839,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 	}
 
 	/* A final mmio read to let GPU writes be hopefully flushed to memory */
-	POSTING_READ_FW(mode);
+	intel_uncore_posting_read_fw(uncore, mode);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index dcb88e7b79c40..fb9af193dbfc7 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -398,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 			u32 *response_buf, u32 response_buf_size)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 status;
 	int i;
 	int ret;
@@ -414,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
 
 	mutex_lock(&guc->send_mutex);
-	intel_uncore_forcewake_get(&dev_priv->uncore, guc->send_regs.fw_domains);
+	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
 
 	for (i = 0; i < len; i++)
-		I915_WRITE(guc_send_reg(guc, i), action[i]);
+		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
 
-	POSTING_READ(guc_send_reg(guc, i - 1));
+	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
 
 	intel_guc_notify(guc);
 
@@ -427,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 	 * No GuC command should ever take longer than 10ms.
 	 * Fast commands should still complete in 10us.
 	 */
-	ret = __intel_wait_for_register_fw(dev_priv,
+	ret = __intel_wait_for_register_fw(uncore,
 					   guc_send_reg(guc, 0),
 					   INTEL_GUC_MSG_TYPE_MASK,
 					   INTEL_GUC_MSG_TYPE_RESPONSE <<
@@ -454,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 	ret = INTEL_GUC_MSG_TO_DATA(status);
 
 out:
-	intel_uncore_forcewake_put(&dev_priv->uncore, guc->send_regs.fw_domains);
+	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
 	mutex_unlock(&guc->send_mutex);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 92799b8502f51..68d47c1059392 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 {
 	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
 	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long offset = 0;
 	u32 size;
 	int ret;
 
 	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
 
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	/* Set the source address for the uCode */
 	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
 		 huc_fw->header_offset;
-	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
-	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+	intel_uncore_write(uncore, DMA_ADDR_0_LOW,
+			   lower_32_bits(offset));
+	intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
+			   upper_32_bits(offset) & 0xFFFF);
 
-	/* Hardware doesn't look at destination address for HuC. Set it to 0,
+	/*
+	 * Hardware doesn't look at destination address for HuC. Set it to 0,
 	 * but still program the correct address space.
 	 */
-	I915_WRITE(DMA_ADDR_1_LOW, 0);
-	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+	intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
+	intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
 
 	size = huc_fw->header_size + huc_fw->ucode_size;
-	I915_WRITE(DMA_COPY_SIZE, size);
+	intel_uncore_write(uncore, DMA_COPY_SIZE, size);
 
 	/* Start the DMA */
-	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+	intel_uncore_write(uncore, DMA_CTRL,
+			   _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
 
 	/* Wait for DMA to finish */
-	ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
 
 	DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
 
 	/* Disable the bits once DMA is over */
-	I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+	intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 5a733e711355b..422685d120e94 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
 	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 	I915_WRITE_FW(GMBUS4, irq_enable);
 
-	ret = intel_wait_for_register_fw(dev_priv,
+	ret = intel_wait_for_register_fw(&dev_priv->uncore,
 					 GMBUS2, GMBUS_ACTIVE, 0,
 					 10);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e5e030f4f43f9..9a6eb2ef5f486 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9687,7 +9687,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
 	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-	if (__intel_wait_for_register_fw(dev_priv,
+	if (__intel_wait_for_register_fw(&dev_priv->uncore,
 					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
 					 500, 0, NULL)) {
 		DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
@@ -9735,7 +9735,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
 	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
 	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-	if (__intel_wait_for_register_fw(dev_priv,
+	if (__intel_wait_for_register_fw(&dev_priv->uncore,
 					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
 					 fast_timeout_us, slow_timeout_ms,
 					 NULL)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 359d6af1a0b95..5103bada89002 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2051,23 +2051,23 @@ int intel_ring_cacheline_align(struct i915_request *rq)
 
 static void gen6_bsd_submit_request(struct i915_request *request)
 {
-	struct drm_i915_private *dev_priv = request->i915;
+	struct intel_uncore *uncore = &request->i915->uncore;
 
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
        /* Every tail move must follow the sequence below */
 
 	/* Disable notification that the ring is IDLE. The GT
 	 * will then assume that it is busy and bring it out of rc6.
 	 */
-	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
-		      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+	intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+			      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
 	/* Clear the context id. Here be magic! */
-	I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
+	intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
 
 	/* Wait for the ring not to be idle, i.e. for it to wake up. */
-	if (__intel_wait_for_register_fw(dev_priv,
+	if (__intel_wait_for_register_fw(uncore,
 					 GEN6_BSD_SLEEP_PSMI_CONTROL,
 					 GEN6_BSD_SLEEP_INDICATOR,
 					 0,
@@ -2080,10 +2080,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
 	 */
-	I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
-		      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+	intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+			      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 }
 
 static int mi_flush_dw(struct i915_request *rq, u32 flags)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8b7ef47faf2e4..83a8ae5c8e759 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1759,7 +1759,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
 
 /**
  * __intel_wait_for_register_fw - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
  * @reg: the register to read
  * @mask: mask to apply to register value
  * @value: expected value
@@ -1783,7 +1783,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
  *
  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  */
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
 				 i915_reg_t reg,
 				 u32 mask,
 				 u32 value,
@@ -1792,7 +1792,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
 				 u32 *out_value)
 {
 	u32 uninitialized_var(reg_value);
-#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
 	int ret;
 
 	/* Catch any overuse of this function */
@@ -1850,7 +1850,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	spin_lock_irq(&uncore->lock);
 	intel_uncore_forcewake_get__locked(uncore, fw);
 
-	ret = __intel_wait_for_register_fw(dev_priv,
+	ret = __intel_wait_for_register_fw(uncore,
 					   reg, mask, value,
 					   fast_timeout_us, 0, &reg_value);
 
@@ -1858,7 +1858,8 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	spin_unlock_irq(&uncore->lock);
 
 	if (ret && slow_timeout_ms)
-		ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
+								       reg),
 				 (reg_value & mask) == value,
 				 slow_timeout_ms * 1000, 10, 1000);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e3c2c37c51e2e..7d83fbd8fc2db 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -220,31 +220,32 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 			      unsigned int fast_timeout_us,
 			      unsigned int slow_timeout_ms,
 			      u32 *out_value);
-static inline
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
-			    i915_reg_t reg,
-			    u32 mask,
-			    u32 value,
-			    unsigned int timeout_ms)
+static inline int
+intel_wait_for_register(struct drm_i915_private *dev_priv,
+			i915_reg_t reg,
+			u32 mask,
+			u32 value,
+			unsigned int timeout_ms)
 {
 	return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
 					 timeout_ms, NULL);
 }
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
 				 i915_reg_t reg,
 				 u32 mask,
 				 u32 value,
 				 unsigned int fast_timeout_us,
 				 unsigned int slow_timeout_ms,
 				 u32 *out_value);
-static inline
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
-			       i915_reg_t reg,
-			       u32 mask,
-			       u32 value,
+static inline int
+intel_wait_for_register_fw(struct intel_uncore *uncore,
+			   i915_reg_t reg,
+			   u32 mask,
+			   u32 value,
 			       unsigned int timeout_ms)
 {
-	return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+	return __intel_wait_for_register_fw(uncore, reg, mask, value,
 					    2, timeout_ms, NULL);
 }
 
@@ -367,6 +368,13 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
 
+static inline void intel_uncore_rmw_or_fw(struct intel_uncore *uncore,
+					  i915_reg_t reg, u32 or_val)
+{
+	intel_uncore_write_fw(uncore, reg,
+			      intel_uncore_read_fw(uncore, reg) | or_val);
+}
+
 #define raw_reg_read(base, reg) \
 	readl(base + i915_mmio_reg_offset(reg))
 #define raw_reg_write(base, reg, value) \

From 97a04e0d07c4b32dd76c6a03b3384170b738972f Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:39 -0700
Subject: [PATCH 77/95] drm/i915: switch intel_wait_for_register to uncore

The intel_uncore structure is the owner of register access, so
subclass the function to it.

While at it, use a local uncore var and switch to the new read/write
functions where it makes sense.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-9-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c         |  2 +-
 drivers/gpu/drm/i915/i915_perf.c        | 12 ++---
 drivers/gpu/drm/i915/icl_dsi.c          |  6 ++-
 drivers/gpu/drm/i915/intel_cdclk.c      | 12 ++---
 drivers/gpu/drm/i915/intel_crt.c        |  6 +--
 drivers/gpu/drm/i915/intel_ddi.c        |  2 +-
 drivers/gpu/drm/i915/intel_display.c    | 25 +++++-----
 drivers/gpu/drm/i915/intel_dp.c         |  4 +-
 drivers/gpu/drm/i915/intel_dp_mst.c     |  2 +-
 drivers/gpu/drm/i915/intel_dpio_phy.c   |  2 +-
 drivers/gpu/drm/i915/intel_dpll_mgr.c   | 25 +++++-----
 drivers/gpu/drm/i915/intel_fbc.c        |  2 +-
 drivers/gpu/drm/i915/intel_guc.c        |  2 +-
 drivers/gpu/drm/i915/intel_hdcp.c       | 17 +++----
 drivers/gpu/drm/i915/intel_huc.c        |  2 +-
 drivers/gpu/drm/i915/intel_lvds.c       |  6 ++-
 drivers/gpu/drm/i915/intel_psr.c        |  8 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c |  7 +--
 drivers/gpu/drm/i915/intel_runtime_pm.c |  8 ++--
 drivers/gpu/drm/i915/intel_sideband.c   | 12 ++---
 drivers/gpu/drm/i915/intel_uncore.c     | 17 ++++---
 drivers/gpu/drm/i915/intel_uncore.h     |  6 +--
 drivers/gpu/drm/i915/vlv_dsi.c          | 61 +++++++++++++++----------
 drivers/gpu/drm/i915/vlv_dsi_pll.c      |  4 +-
 24 files changed, 134 insertions(+), 116 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 622b175b05fdd..ca8a8891b7b3e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2687,7 +2687,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
 	if (!force_on)
 		return 0;
 
-	err = intel_wait_for_register(dev_priv,
+	err = intel_wait_for_register(&dev_priv->uncore,
 				      VLV_GTLC_SURVIVABILITY_REG,
 				      VLV_GFX_CLK_STATUS_BIT,
 				      VLV_GFX_CLK_STATUS_BIT,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 85c5cb779297d..39a4804091d70 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1920,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 
 static void gen7_oa_disable(struct i915_perf_stream *stream)
 {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct intel_uncore *uncore = &stream->dev_priv->uncore;
 
-	I915_WRITE(GEN7_OACONTROL, 0);
-	if (intel_wait_for_register(dev_priv,
+	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+	if (intel_wait_for_register(uncore,
 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
 				    50))
 		DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -1931,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
 
 static void gen8_oa_disable(struct i915_perf_stream *stream)
 {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct intel_uncore *uncore = &stream->dev_priv->uncore;
 
-	I915_WRITE(GEN8_OACONTROL, 0);
-	if (intel_wait_for_register(dev_priv,
+	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+	if (intel_wait_for_register(uncore,
 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
 				    50))
 		DRM_ERROR("wait for OA to be disabled timed out\n");
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 1395338c6772c..b67ffaa283dc4 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -861,7 +861,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
 		I915_WRITE(PIPECONF(dsi_trans), tmp);
 
 		/* wait for transcoder to be enabled */
-		if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    PIPECONF(dsi_trans),
 					    I965_PIPECONF_ACTIVE,
 					    I965_PIPECONF_ACTIVE, 10))
 			DRM_ERROR("DSI transcoder not enabled\n");
@@ -1039,7 +1040,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
 		I915_WRITE(PIPECONF(dsi_trans), tmp);
 
 		/* wait for transcoder to be disabled */
-		if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    PIPECONF(dsi_trans),
 					    I965_PIPECONF_ACTIVE, 0, 50))
 			DRM_ERROR("DSI trancoder not disabled\n");
 	}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d27ccd23d753a..d40f8793718cd 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -965,7 +965,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 
 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
 				    5))
 		DRM_ERROR("DPLL0 not locked\n");
@@ -979,9 +979,9 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-	if (intel_wait_for_register(dev_priv,
-				   LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
-				   1))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+				    1))
 		DRM_ERROR("Couldn't disable DPLL0\n");
 
 	dev_priv->cdclk.hw.vco = 0;
@@ -1324,7 +1324,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
 	I915_WRITE(BXT_DE_PLL_ENABLE, 0);
 
 	/* Timeout 200us */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
 				    1))
 		DRM_ERROR("timeout waiting for DE PLL unlock\n");
@@ -1345,7 +1345,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
 	I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
 
 	/* Timeout 200us */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    BXT_DE_PLL_ENABLE,
 				    BXT_DE_PLL_LOCK,
 				    BXT_DE_PLL_LOCK,
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3716b2ee362fd..50530e49982c5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -435,7 +435,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
 		I915_WRITE(crt->adpa_reg, adpa);
 
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    crt->adpa_reg,
 					    ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
 					    1000))
@@ -489,7 +489,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
 	I915_WRITE(crt->adpa_reg, adpa);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    crt->adpa_reg,
 				    ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
 				    1000)) {
@@ -542,7 +542,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 					      CRT_HOTPLUG_FORCE_DETECT,
 					      CRT_HOTPLUG_FORCE_DETECT);
 		/* wait for FORCE_DETECT to go off */
-		if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+		if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
 					    CRT_HOTPLUG_FORCE_DETECT, 0,
 					    1000))
 			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ba3de1079e63a..d33fe2952be3a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3046,7 +3046,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
 	val |= DP_TP_CTL_FEC_ENABLE;
 	I915_WRITE(DP_TP_CTL(port), val);
 
-	if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+	if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
 				    DP_TP_STATUS_FEC_ENABLE_LIVE,
 				    DP_TP_STATUS_FEC_ENABLE_LIVE,
 				    1))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 61c7bbd066e31..3371bf96dabdd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1040,7 +1040,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 		i915_reg_t reg = PIPECONF(cpu_transcoder);
 
 		/* Wait for the Pipe State to go off */
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    reg, I965_PIPECONF_ACTIVE, 0,
 					    100))
 			WARN(1, "pipe_off wait timed out\n");
@@ -1346,7 +1346,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
 	POSTING_READ(DPLL(pipe));
 	udelay(150);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DPLL(pipe),
 				    DPLL_LOCK_VLV,
 				    DPLL_LOCK_VLV,
@@ -1399,7 +1399,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
 	/* Check PLL is locked */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
 				    1))
 		DRM_ERROR("PLL %d failed to lock\n", pipe);
@@ -1580,7 +1580,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 		BUG();
 	}
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    dpll_reg, port_mask, expected_mask,
 				    1000))
 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@@ -1641,7 +1641,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
 	}
 
 	I915_WRITE(reg, val | TRANS_ENABLE);
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
 				    100))
 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
@@ -1671,7 +1671,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 		val |= TRANS_PROGRESSIVE;
 
 	I915_WRITE(LPT_TRANSCONF, val);
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    LPT_TRANSCONF,
 				    TRANS_STATE_ENABLE,
 				    TRANS_STATE_ENABLE,
@@ -1697,7 +1697,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 	val &= ~TRANS_ENABLE;
 	I915_WRITE(reg, val);
 	/* wait for PCH transcoder off, transcoder state */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    reg, TRANS_STATE_ENABLE, 0,
 				    50))
 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
@@ -1719,7 +1719,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 	val &= ~TRANS_ENABLE;
 	I915_WRITE(LPT_TRANSCONF, val);
 	/* wait for PCH transcoder off, transcoder state */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
 				    50))
 		DRM_ERROR("Failed to disable PCH transcoder\n");
@@ -5299,7 +5299,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
 		 * and don't wait for vblanks until the end of crtc_enable, then
 		 * the HW state readout code will complain that the expected
 		 * IPS_CTL value is not the one we read. */
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    IPS_CTL, IPS_ENABLE, IPS_ENABLE,
 					    50))
 			DRM_ERROR("Timed out waiting for IPS enable\n");
@@ -5324,7 +5324,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
 		 * 42ms timeout value leads to occasional timeouts so use 100ms
 		 * instead.
 		 */
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    IPS_CTL, IPS_ENABLE, 0,
 					    100))
 			DRM_ERROR("Timed out waiting for IPS disable\n");
@@ -9481,7 +9481,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 	I915_WRITE(LCPLL_CTL, val);
 	POSTING_READ(LCPLL_CTL);
 
-	if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
 		DRM_ERROR("LCPLL still locked\n");
 
 	val = hsw_read_dcomp(dev_priv);
@@ -9536,7 +9537,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 	val &= ~LCPLL_PLL_DISABLE;
 	I915_WRITE(LCPLL_CTL, val);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
 				    5))
 		DRM_ERROR("LCPLL not locked yet\n");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 326de12c3f44a..74fe14103459a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2348,7 +2348,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
 			I915_READ(pp_stat_reg),
 			I915_READ(pp_ctrl_reg));
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    pp_stat_reg, mask, value,
 				    5000))
 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@@ -3937,7 +3937,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 	if (port == PORT_A)
 		return;
 
-	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+	if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
 				    DP_TP_STATUS_IDLE_DONE,
 				    DP_TP_STATUS_IDLE_DONE,
 				    1))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fb67cd9311178..ecad1f58ebf8c 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -289,7 +289,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DP_TP_STATUS(port),
 				    DP_TP_STATUS_ACT_SENT,
 				    DP_TP_STATUS_ACT_SENT,
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index a0591b64070b7..db295c77ff0dd 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -341,7 +341,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
 				  enum dpio_phy phy)
 {
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    BXT_PORT_REF_DW3(phy),
 				    GRC_DONE, GRC_DONE,
 				    10))
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index eeb659946203c..e01c057ce50be 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -960,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(regs[id].ctl,
 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DPLL_STATUS,
 				    DPLL_LOCK(id),
 				    DPLL_LOCK(id),
@@ -1977,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    CNL_DPLL_ENABLE(id),
 				    PLL_POWER_STATE,
 				    PLL_POWER_STATE,
@@ -2018,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    CNL_DPLL_ENABLE(id),
 				    PLL_LOCK,
 				    PLL_LOCK,
@@ -2066,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    CNL_DPLL_ENABLE(id),
 				    PLL_LOCK,
 				    0,
@@ -2088,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    CNL_DPLL_ENABLE(id),
 				    PLL_POWER_STATE,
 				    0,
@@ -3050,8 +3050,8 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
 	 * The spec says we need to "wait" but it also says it should be
 	 * immediate.
 	 */
-	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
-				    PLL_POWER_STATE, 1))
+	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+				    PLL_POWER_STATE, PLL_POWER_STATE, 1))
 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
 }
 
@@ -3066,8 +3066,8 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(enable_reg, val);
 
 	/* Timeout is actually 600us. */
-	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
-				    1))
+	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+				    PLL_LOCK, PLL_LOCK, 1))
 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
 }
 
@@ -3149,7 +3149,8 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 	I915_WRITE(enable_reg, val);
 
 	/* Timeout is actually 1us. */
-	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    enable_reg, PLL_LOCK, 0, 1))
 		DRM_ERROR("PLL %d locked\n", pll->info->id);
 
 	/* DVFS post sequence would be here. See the comment above. */
@@ -3162,8 +3163,8 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 	 * The spec says we need to "wait" but it also says it should be
 	 * immediate.
 	 */
-	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
-				    1))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    enable_reg, PLL_POWER_STATE, 0, 1))
 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 656e684e7c9a3..43fe08be3b7d8 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -108,7 +108,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	/* Wait for compressing bit to clear */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    FBC_STATUS, FBC_STAT_COMPRESSING, 0,
 				    10)) {
 		DRM_DEBUG_KMS("FBC idle timed out\n");
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index fb9af193dbfc7..3aabfa2d9198e 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -565,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc,
 	if (ret)
 		return ret;
 
-	ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+	ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
 					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
 					0, 0, 10, &status);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 9ce09f67776d1..86965fa377390 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -225,7 +225,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 	}
 
 	/* Wait for the keys to load (500us) */
-	ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
 					10, 1, &val);
 	if (ret)
@@ -243,7 +243,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
 {
 	I915_WRITE(HDCP_SHA_TEXT, sha_text);
-	if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+	if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
 				    HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
 		DRM_ERROR("Timed out waiting for SHA1 ready\n");
 		return -ETIMEDOUT;
@@ -474,7 +474,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
 
 	/* Tell the HW we're done with the hash and wait for it to ACK */
 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
-	if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+	if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
 				    HDCP_SHA1_COMPLETE,
 				    HDCP_SHA1_COMPLETE, 1)) {
 		DRM_ERROR("Timed out waiting for SHA1 complete\n");
@@ -604,7 +604,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 	I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
 
 	/* Wait for An to be acquired */
-	if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+	if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
 				    HDCP_STATUS_AN_READY,
 				    HDCP_STATUS_AN_READY, 1)) {
 		DRM_ERROR("Timed out waiting for An\n");
@@ -685,7 +685,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 	}
 
 	/* Wait for encryption confirmation */
-	if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+	if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
 				    HDCP_STATUS_ENC, HDCP_STATUS_ENC,
 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 		DRM_ERROR("Timed out waiting for encryption\n");
@@ -717,7 +717,8 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
 
 	hdcp->hdcp_encrypted = false;
 	I915_WRITE(PORT_HDCP_CONF(port), 0);
-	if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    PORT_HDCP_STATUS(port), ~0, 0,
 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
 		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
 		return -ETIMEDOUT;
@@ -1477,7 +1478,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
 			   CTL_LINK_ENCRYPTION_REQ);
 	}
 
-	ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
+	ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
 				      LINK_ENCRYPTION_STATUS,
 				      LINK_ENCRYPTION_STATUS,
 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1498,7 +1499,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
 	I915_WRITE(HDCP2_CTL_DDI(port),
 		   I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
 
-	ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
+	ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
 				      LINK_ENCRYPTION_STATUS, 0x0,
 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
 	if (ret == -ETIMEDOUT)
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 9bd1c9002c2ad..94c04f16a2add 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc)
 	}
 
 	/* Check authentication status, it should be done by now */
-	ret = __intel_wait_for_register(i915,
+	ret = __intel_wait_for_register(&i915->uncore,
 					HUC_STATUS2,
 					HUC_FW_VERIFIED,
 					HUC_FW_VERIFIED,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ccd5ac6d3fcd5..34dd2d71814bb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -311,7 +311,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
 	POSTING_READ(lvds_encoder->reg);
 
-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    PP_STATUS(0), PP_ON, PP_ON, 5000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(pipe_config, conn_state);
@@ -325,7 +326,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    PP_STATUS(0), PP_ON, 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
 
 	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 605fe8fc85cca..ec874d802d48b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -834,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	}
 
 	/* Wait till PSR is idle */
-	if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
-				    2000))
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    psr_status, psr_status_mask, 0, 2000))
 		DRM_ERROR("Timed out waiting PSR idle state\n");
 
 	/* Disable PSR on Sink */
@@ -956,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 	 * defensive enough to cover everything.
 	 */
 
-	return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+	return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
 					 EDP_PSR_STATUS_STATE_MASK,
 					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
 					 out_value);
@@ -981,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 
 	mutex_unlock(&dev_priv->psr.lock);
 
-	err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+	err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
 	if (err)
 		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5103bada89002..a5b4a2d9a4929 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -586,7 +586,7 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
 	I915_WRITE(instpm,
 		   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
 				      INSTPM_SYNC_FLUSH));
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    instpm, INSTPM_SYNC_FLUSH, 0,
 				    1000))
 		DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
@@ -607,7 +607,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(dev_priv) > 2) {
 		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    RING_MI_MODE(engine->mmio_base),
 					    MODE_IDLE,
 					    MODE_IDLE,
@@ -699,7 +699,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    RING_CTL(engine->mmio_base),
 				    RING_VALID, RING_VALID,
 				    50)) {
 		DRM_ERROR("%s initialization failed "
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 676a89bb81943..40ddfbb97acbd 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -565,7 +565,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 	int pw_idx = power_well->desc->hsw.idx;
 
 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
-	WARN_ON(intel_wait_for_register(dev_priv,
+	WARN_ON(intel_wait_for_register(&dev_priv->uncore,
 					regs->driver,
 					HSW_PWR_WELL_CTL_STATE(pw_idx),
 					HSW_PWR_WELL_CTL_STATE(pw_idx),
@@ -620,7 +620,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 					   enum skl_power_gate pg)
 {
 	/* Timeout 5us for PG#0, for other PGs 1us */
-	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+	WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
 					SKL_FUSE_PG_DIST_STATUS(pg),
 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
 }
@@ -1521,7 +1521,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
 	 * The PHY may be busy with some initial calibration and whatnot,
 	 * so the power state can take a while to actually change.
 	 */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DISPLAY_PHY_STATUS,
 				    phy_status_mask,
 				    phy_status,
@@ -1556,7 +1556,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	vlv_set_power_well(dev_priv, power_well, true);
 
 	/* Poll for phypwrgood signal */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    DISPLAY_PHY_STATUS,
 				    PHY_POWERGOOD(phy),
 				    PHY_POWERGOOD(phy),
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 75c872bb8cc9d..57de41b1f9892 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 
 	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
 				    5)) {
 		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 	I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
 	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
 				    5)) {
 		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
@@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 	u32 value = 0;
 	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    SBI_CTL_STAT, SBI_BUSY, 0,
 				    100)) {
 		DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
 	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    SBI_CTL_STAT,
 				    SBI_BUSY,
 				    0,
@@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 
 	WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    SBI_CTL_STAT, SBI_BUSY, 0,
 				    100)) {
 		DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
 	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    SBI_CTL_STAT,
 				    SBI_BUSY,
 				    0,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 83a8ae5c8e759..35d9ce33bf83b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1831,15 +1831,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
  *
  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  */
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
-			    i915_reg_t reg,
-			    u32 mask,
-			    u32 value,
-			    unsigned int fast_timeout_us,
-			    unsigned int slow_timeout_ms,
-			    u32 *out_value)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
+int __intel_wait_for_register(struct intel_uncore *uncore,
+			      i915_reg_t reg,
+			      u32 mask,
+			      u32 value,
+			      unsigned int fast_timeout_us,
+			      unsigned int slow_timeout_ms,
+			      u32 *out_value)
+{
 	unsigned fw =
 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
 	u32 reg_value;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 7d83fbd8fc2db..06762501e9eee 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -213,7 +213,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
 
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct intel_uncore *uncore,
 			      i915_reg_t reg,
 			      u32 mask,
 			      u32 value,
@@ -221,13 +221,13 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 			      unsigned int slow_timeout_ms,
 			      u32 *out_value);
 static inline int
-intel_wait_for_register(struct drm_i915_private *dev_priv,
+intel_wait_for_register(struct intel_uncore *uncore,
 			i915_reg_t reg,
 			u32 mask,
 			u32 value,
 			unsigned int timeout_ms)
 {
-	return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+	return __intel_wait_for_register(uncore, reg, mask, value, 2,
 					 timeout_ms, NULL);
 }
 
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 01278b2e257c6..0a950c976bbb6 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -78,7 +78,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 	mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
 		LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    MIPI_GEN_FIFO_STAT(port), mask, mask,
 				    100))
 		DRM_ERROR("DPI FIFOs are not empty\n");
@@ -148,7 +148,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 
 	/* note: this is never true for reads */
 	if (packet.payload_length) {
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    MIPI_GEN_FIFO_STAT(port),
 					    data_mask, 0,
 					    50))
@@ -162,7 +162,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 		I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
 	}
 
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    MIPI_GEN_FIFO_STAT(port),
 				    ctrl_mask, 0,
 				    50)) {
@@ -174,7 +174,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 	/* ->rx_len is set only for reads */
 	if (msg->rx_len) {
 		data_mask = GEN_READ_DATA_AVAIL;
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    MIPI_INTR_STAT(port),
 					    data_mask, data_mask,
 					    50))
@@ -234,7 +234,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
 	I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
 
 	mask = SPL_PKT_SENT_INTERRUPT;
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    MIPI_INTR_STAT(port), mask, mask,
 				    100))
 		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@@ -353,16 +353,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
 
 	/* Wait for Pwr ACK */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
-				MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
-				GLK_MIPIIO_PORT_POWERED, 20))
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    MIPI_CTRL(port),
+					    GLK_MIPIIO_PORT_POWERED,
+					    GLK_MIPIIO_PORT_POWERED,
+					    20))
 			DRM_ERROR("MIPIO port is powergated\n");
 	}
 
 	/* Check for cold boot scenario */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
-							DEVICE_READY);
+		cold_boot |=
+			!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
 	}
 
 	return cold_boot;
@@ -377,9 +379,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 
 	/* Wait for MIPI PHY status bit to set */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
-				MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
-				GLK_PHY_STATUS_PORT_READY, 20))
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    MIPI_CTRL(port),
+					    GLK_PHY_STATUS_PORT_READY,
+					    GLK_PHY_STATUS_PORT_READY,
+					    20))
 			DRM_ERROR("PHY is not ON\n");
 	}
 
@@ -403,8 +407,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 			I915_WRITE(MIPI_DEVICE_READY(port), val);
 
 			/* Wait for ULPS active */
-			if (intel_wait_for_register(dev_priv,
-				MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+			if (intel_wait_for_register(&dev_priv->uncore,
+						    MIPI_CTRL(port),
+						    GLK_ULPS_NOT_ACTIVE,
+						    0,
+						    20))
 				DRM_ERROR("ULPS not active\n");
 
 			/* Exit ULPS */
@@ -427,17 +434,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 
 	/* Wait for Stop state */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
-				MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
-				GLK_DATA_LANE_STOP_STATE, 20))
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    MIPI_CTRL(port),
+					    GLK_DATA_LANE_STOP_STATE,
+					    GLK_DATA_LANE_STOP_STATE,
+					    20))
 			DRM_ERROR("Date lane not in STOP state\n");
 	}
 
 	/* Wait for AFE LATCH */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
-				BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
-				AFE_LATCHOUT, 20))
+		if (intel_wait_for_register(&dev_priv->uncore,
+					    BXT_MIPI_PORT_CTRL(port),
+					    AFE_LATCHOUT,
+					    AFE_LATCHOUT,
+					    20))
 			DRM_ERROR("D-PHY not entering LP-11 state\n");
 	}
 }
@@ -537,7 +548,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
 
 	/* Wait for MIPI PHY status bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    MIPI_CTRL(port),
 					    GLK_PHY_STATUS_PORT_READY, 0, 20))
 			DRM_ERROR("PHY is not turning OFF\n");
@@ -545,7 +556,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
 
 	/* Wait for Pwr ACK bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    MIPI_CTRL(port),
 					    GLK_MIPIIO_PORT_POWERED, 0, 20))
 			DRM_ERROR("MIPI IO Port is not powergated\n");
@@ -566,7 +577,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
 
 	/* Wait for MIPI PHY status bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (intel_wait_for_register(dev_priv,
+		if (intel_wait_for_register(&dev_priv->uncore,
 					    MIPI_CTRL(port),
 					    GLK_PHY_STATUS_PORT_READY, 0, 20))
 			DRM_ERROR("PHY is not turning OFF\n");
@@ -616,7 +627,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
 		 * Port A only. MIPI Port C has no similar bit for checking.
 		 */
 		if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
-		    intel_wait_for_register(dev_priv,
+		    intel_wait_for_register(&dev_priv->uncore,
 					    port_ctrl, AFE_LATCHOUT, 0,
 					    30))
 			DRM_ERROR("DSI LP not going Low\n");
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 954d5a8c4fa76..5e7b1fb2db5db 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
 	 * PLL lock should deassert within 200us.
 	 * Wait up to 1ms before timing out.
 	 */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    BXT_DSI_PLL_ENABLE,
 				    BXT_DSI_PLL_LOCKED,
 				    0,
@@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
 	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
 
 	/* Timeout and fail if PLL not locked */
-	if (intel_wait_for_register(dev_priv,
+	if (intel_wait_for_register(&dev_priv->uncore,
 				    BXT_DSI_PLL_ENABLE,
 				    BXT_DSI_PLL_LOCKED,
 				    BXT_DSI_PLL_LOCKED,

From baba6e572b38ecd399048cb5b03db88e67069902 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Mon, 25 Mar 2019 14:49:40 -0700
Subject: [PATCH 78/95] drm/i915: take a reference to uncore in the engine and
 use it

A few advantages:

- Prepares us for the planned split of display uncore from GT uncore

- Improves our engine-centric view of the world in the engine code
  and allows us to avoid jumping back to dev_priv.

- Allows us to wrap accesses to engine register in nice macros that
  automatically pick the right mmio base.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-10-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/gvt/handlers.c       |   2 +-
 drivers/gpu/drm/i915/i915_debugfs.c       |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c     |  42 +++---
 drivers/gpu/drm/i915/i915_reg.h           |  16 +--
 drivers/gpu/drm/i915/i915_reset.c         |  13 +-
 drivers/gpu/drm/i915/intel_engine_cs.c    | 134 ++++++++++---------
 drivers/gpu/drm/i915/intel_engine_types.h |   2 +
 drivers/gpu/drm/i915/intel_hangcheck.c    |   4 +-
 drivers/gpu/drm/i915/intel_lrc.c          |  21 ++-
 drivers/gpu/drm/i915/intel_lrc.h          |  22 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c   | 156 +++++++++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.h   |  45 +++++--
 drivers/gpu/drm/i915/intel_uncore.c       |   2 +-
 13 files changed, 243 insertions(+), 218 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b596cb42e24eb..dbc7496179226 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1848,7 +1848,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
 
 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
-	MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
 	MMIO_D(GEN7_CXT_SIZE, D_ALL);
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47bf07a59b5e1..bb2c16c439ea6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -880,7 +880,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		for_each_engine(engine, dev_priv, id) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
-				   engine->name, I915_READ_IMR(engine));
+				   engine->name, ENGINE_READ(engine, RING_IMR));
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a9557f92756f6..a2a98ccda4217 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1136,7 +1136,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+		ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
 		if (INTEL_GEN(dev_priv) >= 8)
 			ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
 		else
@@ -1144,32 +1144,32 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	}
 
 	if (INTEL_GEN(dev_priv) >= 4) {
-		ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
-		ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
-		ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-		ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
-		ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
+		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
+		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
+		ee->instps = ENGINE_READ(engine, RING_INSTPS);
+		ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
 		if (INTEL_GEN(dev_priv) >= 8) {
-			ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
-			ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+			ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
+			ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
 		}
-		ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+		ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
 	} else {
-		ee->faddr = I915_READ(DMA_FADD_I8XX);
-		ee->ipeir = I915_READ(IPEIR);
-		ee->ipehr = I915_READ(IPEHR);
+		ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
+		ee->ipeir = ENGINE_READ(engine, IPEIR);
+		ee->ipehr = ENGINE_READ(engine, IPEHR);
 	}
 
 	intel_engine_get_instdone(engine, &ee->instdone);
 
-	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+	ee->instpm = ENGINE_READ(engine, RING_INSTPM);
 	ee->acthd = intel_engine_get_active_head(engine);
-	ee->start = I915_READ_START(engine);
-	ee->head = I915_READ_HEAD(engine);
-	ee->tail = I915_READ_TAIL(engine);
-	ee->ctl = I915_READ_CTL(engine);
+	ee->start = ENGINE_READ(engine, RING_START);
+	ee->head = ENGINE_READ(engine, RING_HEAD);
+	ee->tail = ENGINE_READ(engine, RING_TAIL);
+	ee->ctl = ENGINE_READ(engine, RING_CTL);
 	if (INTEL_GEN(dev_priv) > 2)
-		ee->mode = I915_READ_MODE(engine);
+		ee->mode = ENGINE_READ(engine, RING_MI_MODE);
 
 	if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
 		i915_reg_t mmio;
@@ -1214,10 +1214,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 
 		if (IS_GEN(dev_priv, 6))
 			ee->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE_READ(engine));
+				ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
 		else if (IS_GEN(dev_priv, 7))
 			ee->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE(engine));
+					ENGINE_READ(engine, RING_PP_DIR_BASE);
 		else if (INTEL_GEN(dev_priv) >= 8)
 			for (i = 0; i < 4; i++) {
 				ee->vm_info.pdp[i] =
@@ -1601,7 +1601,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
 	}
 
 	if (INTEL_GEN(dev_priv) >= 5)
-		error->ccid = I915_READ(CCID);
+		error->ccid = I915_READ(CCID(RENDER_RING_BASE));
 
 	/* 3: Feature specific registers */
 	if (IS_GEN_RANGE(dev_priv, 6, 7)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b46910453e612..362f483f8ab2d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -434,9 +434,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN11_VECS_SFC_USAGE(engine)		_MMIO((engine)->mmio_base + 0x2014)
 #define   GEN11_VECS_SFC_USAGE_BIT		(1 << 0)
 
-#define RING_PP_DIR_BASE(engine)	_MMIO((engine)->mmio_base + 0x228)
-#define RING_PP_DIR_BASE_READ(engine)	_MMIO((engine)->mmio_base + 0x518)
-#define RING_PP_DIR_DCLV(engine)	_MMIO((engine)->mmio_base + 0x220)
+#define RING_PP_DIR_BASE(base)		_MMIO((base) + 0x228)
+#define RING_PP_DIR_BASE_READ(base)	_MMIO((base) + 0x518)
+#define RING_PP_DIR_DCLV(base)		_MMIO((base) + 0x220)
 #define   PP_DIR_DCLV_2G		0xffffffff
 
 #define GEN8_RING_PDP_UDW(engine, n)	_MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
@@ -2568,12 +2568,12 @@ enum i915_power_well_id {
 #define HWS_START_ADDRESS_SHIFT	4
 #define PWRCTXA		_MMIO(0x2088) /* 965GM+ only */
 #define   PWRCTX_EN	(1 << 0)
-#define IPEIR		_MMIO(0x2088)
-#define IPEHR		_MMIO(0x208c)
+#define IPEIR(base)	_MMIO((base) + 0x88)
+#define IPEHR(base)	_MMIO((base) + 0x8c)
 #define GEN2_INSTDONE	_MMIO(0x2090)
 #define NOPID		_MMIO(0x2094)
 #define HWSTAM		_MMIO(0x2098)
-#define DMA_FADD_I8XX	_MMIO(0x20d0)
+#define DMA_FADD_I8XX(base)	_MMIO((base) + 0xd0)
 #define RING_BBSTATE(base)	_MMIO((base) + 0x110)
 #define   RING_BB_PPGTT		(1 << 5)
 #define RING_SBBADDR(base)	_MMIO((base) + 0x114) /* hsw+ */
@@ -2747,7 +2747,7 @@ enum i915_power_well_id {
 #define   INSTPM_FORCE_ORDERING				(1 << 7) /* GEN6+ */
 #define   INSTPM_TLB_INVALIDATE	(1 << 9)
 #define   INSTPM_SYNC_FLUSH	(1 << 5)
-#define ACTHD	        _MMIO(0x20c8)
+#define ACTHD(base)	_MMIO((base) + 0xc8)
 #define MEM_MODE	_MMIO(0x20cc)
 #define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
 #define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
@@ -3947,7 +3947,7 @@ enum i915_power_well_id {
 /*
  * Logical Context regs
  */
-#define CCID				_MMIO(0x2180)
+#define CCID(base)			_MMIO((base) + 0x180)
 #define   CCID_EN			BIT(0)
 #define   CCID_EXTENDED_STATE_RESTORE	BIT(2)
 #define   CCID_EXTENDED_STATE_SAVE	BIT(3)
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 17f802a8f8f04..2f25ed702ba04 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -1173,19 +1173,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
 }
 
+static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
+{
+	I915_WRITE(reg, I915_READ(reg));
+}
+
 void i915_clear_error_registers(struct drm_i915_private *dev_priv)
 {
 	u32 eir;
 
 	if (!IS_GEN(dev_priv, 2))
-		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+		clear_register(dev_priv, PGTBL_ER);
 
 	if (INTEL_GEN(dev_priv) < 4)
-		I915_WRITE(IPEIR, I915_READ(IPEIR));
+		clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
 	else
-		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+		clear_register(dev_priv, IPEIR_I965);
 
-	I915_WRITE(EIR, I915_READ(EIR));
+	clear_register(dev_priv, EIR);
 	eir = I915_READ(EIR);
 	if (eir) {
 		/*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index c936c6df34e47..d0427c2e39972 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -255,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
 
 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	i915_reg_t hwstam;
-
 	/*
 	 * Though they added more rings on g4x/ilk, they did not add
 	 * per-engine HWSTAM until gen6.
 	 */
-	if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+	if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
 		return;
 
-	hwstam = RING_HWSTAM(engine->mmio_base);
-	if (INTEL_GEN(dev_priv) >= 3)
-		I915_WRITE(hwstam, mask);
+	if (INTEL_GEN(engine->i915) >= 3)
+		ENGINE_WRITE(engine, RING_HWSTAM, mask);
 	else
-		I915_WRITE16(hwstam, mask);
+		ENGINE_WRITE16(engine, RING_HWSTAM, mask);
 }
 
 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
@@ -309,6 +305,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	engine->id = id;
 	engine->mask = BIT(id);
 	engine->i915 = dev_priv;
+	engine->uncore = &dev_priv->uncore;
 	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
 	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
@@ -787,37 +784,35 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *i915 = engine->i915;
+
 	u64 acthd;
 
-	if (INTEL_GEN(dev_priv) >= 8)
-		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
-					 RING_ACTHD_UDW(engine->mmio_base));
-	else if (INTEL_GEN(dev_priv) >= 4)
-		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+	if (INTEL_GEN(i915) >= 8)
+		acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
+	else if (INTEL_GEN(i915) >= 4)
+		acthd = ENGINE_READ(engine, RING_ACTHD);
 	else
-		acthd = I915_READ(ACTHD);
+		acthd = ENGINE_READ(engine, ACTHD);
 
 	return acthd;
 }
 
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	u64 bbaddr;
 
-	if (INTEL_GEN(dev_priv) >= 8)
-		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
-					  RING_BBADDR_UDW(engine->mmio_base));
+	if (INTEL_GEN(engine->i915) >= 8)
+		bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
 	else
-		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+		bbaddr = ENGINE_READ(engine, RING_BBADDR);
 
 	return bbaddr;
 }
 
 int intel_engine_stop_cs(struct intel_engine_cs *engine)
 {
-	struct intel_uncore *uncore = &engine->i915->uncore;
+	struct intel_uncore *uncore = engine->uncore;
 	const u32 base = engine->mmio_base;
 	const i915_reg_t mode = RING_MI_MODE(base);
 	int err;
@@ -846,12 +841,9 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
 	GEM_TRACE("%s\n", engine->name);
 
-	I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
-		      _MASKED_BIT_DISABLE(STOP_RING));
+	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
@@ -946,6 +938,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_uncore *uncore = engine->uncore;
 	u32 mmio_base = engine->mmio_base;
 	int slice;
 	int subslice;
@@ -954,12 +947,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 
 	switch (INTEL_GEN(dev_priv)) {
 	default:
-		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+		instdone->instdone =
+			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
 
 		if (engine->id != RCS0)
 			break;
 
-		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+		instdone->slice_common =
+			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
 		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
 			instdone->sampler[slice][subslice] =
 				read_subslice_reg(dev_priv, slice, subslice,
@@ -970,28 +965,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 		}
 		break;
 	case 7:
-		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+		instdone->instdone =
+			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
 
 		if (engine->id != RCS0)
 			break;
 
-		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
-		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
-		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+		instdone->slice_common =
+			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+		instdone->sampler[0][0] =
+			intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
+		instdone->row[0][0] =
+			intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
 
 		break;
 	case 6:
 	case 5:
 	case 4:
-		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
-
+		instdone->instdone =
+			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
 		if (engine->id == RCS0)
 			/* HACK: Using the wrong struct member */
-			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+			instdone->slice_common =
+				intel_uncore_read(uncore, GEN4_INSTDONE1);
 		break;
 	case 3:
 	case 2:
-		instdone->instdone = I915_READ(GEN2_INSTDONE);
+		instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
 		break;
 	}
 }
@@ -1011,12 +1011,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
 		return true;
 
 	/* First check that no commands are left in the ring */
-	if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
-	    (I915_READ_TAIL(engine) & TAIL_ADDR))
+	if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
+	    (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
 		idle = false;
 
 	/* No bit for gen2, so assume the CS parser is idle */
-	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+	if (INTEL_GEN(dev_priv) > 2 &&
+	    !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
 		idle = false;
 
 	intel_runtime_pm_put(dev_priv, wakeref);
@@ -1332,24 +1333,25 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 	u64 addr;
 
 	if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
-		drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
+		drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
 	drm_printf(m, "\tRING_START: 0x%08x\n",
-		   I915_READ(RING_START(engine->mmio_base)));
+		   ENGINE_READ(engine, RING_START));
 	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
-		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+		   ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
 	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
-		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
+		   ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
 	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
-		   I915_READ(RING_CTL(engine->mmio_base)),
-		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+		   ENGINE_READ(engine, RING_CTL),
+		   ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
 	if (INTEL_GEN(engine->i915) > 2) {
 		drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
-			   I915_READ(RING_MI_MODE(engine->mmio_base)),
-			   I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
+			   ENGINE_READ(engine, RING_MI_MODE),
+			   ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
 	}
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+		drm_printf(m, "\tRING_IMR: %08x\n",
+			   ENGINE_READ(engine, RING_IMR));
 	}
 
 	addr = intel_engine_get_active_head(engine);
@@ -1359,22 +1361,21 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
 		   upper_32_bits(addr), lower_32_bits(addr));
 	if (INTEL_GEN(dev_priv) >= 8)
-		addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
-					RING_DMA_FADD_UDW(engine->mmio_base));
+		addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
 	else if (INTEL_GEN(dev_priv) >= 4)
-		addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+		addr = ENGINE_READ(engine, RING_DMA_FADD);
 	else
-		addr = I915_READ(DMA_FADD_I8XX);
+		addr = ENGINE_READ(engine, DMA_FADD_I8XX);
 	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
 		   upper_32_bits(addr), lower_32_bits(addr));
 	if (INTEL_GEN(dev_priv) >= 4) {
 		drm_printf(m, "\tIPEIR: 0x%08x\n",
-			   I915_READ(RING_IPEIR(engine->mmio_base)));
+			   ENGINE_READ(engine, RING_IPEIR));
 		drm_printf(m, "\tIPEHR: 0x%08x\n",
-			   I915_READ(RING_IPEHR(engine->mmio_base)));
+			   ENGINE_READ(engine, RING_IPEHR));
 	} else {
-		drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
-		drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+		drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
+		drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
 	}
 
 	if (HAS_EXECLISTS(dev_priv)) {
@@ -1384,15 +1385,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 		u8 read, write;
 
 		drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
-			   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
-			   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
 
 		read = execlists->csb_head;
 		write = READ_ONCE(*execlists->csb_write);
 
 		drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
 			   read, write,
-			   GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
+			   GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)),
 			   yesno(test_bit(TASKLET_STATE_SCHED,
 					  &engine->execlists.tasklet.state)),
 			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1407,9 +1408,13 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
 				   idx,
 				   hws[idx * 2],
-				   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+				   ENGINE_READ_IDX(engine,
+						   RING_CONTEXT_STATUS_BUF_LO,
+						   idx),
 				   hws[idx * 2 + 1],
-				   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+				   ENGINE_READ_IDX(engine,
+						   RING_CONTEXT_STATUS_BUF_HI,
+						   idx));
 		}
 
 		rcu_read_lock();
@@ -1436,11 +1441,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 		rcu_read_unlock();
 	} else if (INTEL_GEN(dev_priv) > 6) {
 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
-			   I915_READ(RING_PP_DIR_BASE(engine)));
+			   ENGINE_READ(engine, RING_PP_DIR_BASE));
 		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
-			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
+			   ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
 		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
-			   I915_READ(RING_PP_DIR_DCLV(engine)));
+			   ENGINE_READ(engine, RING_PP_DIR_DCLV));
 	}
 }
 
@@ -1687,8 +1692,7 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 
 static bool match_ring(struct i915_request *rq)
 {
-	struct drm_i915_private *dev_priv = rq->i915;
-	u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
+	u32 ring = ENGINE_READ(rq->engine, RING_START);
 
 	return ring == i915_ggtt_offset(rq->ring->vma);
 }
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
index 88ed7ba8886fc..b3249bf6a65ff 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -29,6 +29,7 @@ struct drm_i915_reg_table;
 struct i915_gem_context;
 struct i915_request;
 struct i915_sched_attr;
+struct intel_uncore;
 
 struct intel_hw_status_page {
 	struct i915_vma *vma;
@@ -250,6 +251,7 @@ struct intel_engine_execlists {
 
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
+	struct intel_uncore *uncore;
 	char name[INTEL_ENGINE_CS_MAX_NAME];
 
 	enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 125662c649341..59232df11ada7 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 * and break the hang. This should work on
 	 * all but the second generation chipsets.
 	 */
-	tmp = I915_READ_CTL(engine);
+	tmp = ENGINE_READ(engine, RING_CTL);
 	if (tmp & RING_WAIT) {
 		i915_handle_error(dev_priv, engine->mask, 0,
 				  "stuck wait on %s", engine->name);
-		I915_WRITE_CTL(engine, tmp);
+		ENGINE_WRITE(engine, RING_CTL, tmp);
 		return ENGINE_WAIT_KICK;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 66bc3cd4e1669..3870215061997 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2074,16 +2074,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 
 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	I915_WRITE_IMR(engine,
-		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
-	POSTING_READ_FW(RING_IMR(engine->mmio_base));
+	ENGINE_WRITE(engine, RING_IMR,
+		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	ENGINE_POSTING_READ(engine, RING_IMR);
 }
 
 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
 }
 
 static int gen8_emit_flush(struct i915_request *request, u32 mode)
@@ -2288,7 +2286,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
-		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+		WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 	}
 
 	if (engine->cleanup)
@@ -2400,6 +2398,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
+	u32 base = engine->mmio_base;
 	int ret;
 
 	ret = intel_engine_init_common(engine);
@@ -2410,12 +2409,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 
 	if (HAS_LOGICAL_RING_ELSQ(i915)) {
 		execlists->submit_reg = i915->uncore.regs +
-			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
+			i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
 		execlists->ctrl_reg = i915->uncore.regs +
-			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+			i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
 	} else {
 		execlists->submit_reg = i915->uncore.regs +
-			i915_mmio_reg_offset(RING_ELSP(engine));
+			i915_mmio_reg_offset(RING_ELSP(base));
 	}
 
 	execlists->preempt_complete_status = ~0u;
@@ -2658,7 +2657,7 @@ static void execlists_init_reg_state(u32 *regs,
 	regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
 				 MI_LRI_FORCE_POSTED;
 
-	CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+	CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
 		_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
 		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
 	if (INTEL_GEN(engine->i915) < 11) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f1aec8a6986fd..92642ab914722 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -28,20 +28,20 @@
 #include "i915_gem_context.h"
 
 /* Execlists regs */
-#define RING_ELSP(engine)			_MMIO((engine)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(engine)		_MMIO((engine)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(engine)		_MMIO((engine)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(engine)		_MMIO((engine)->mmio_base + 0x244)
+#define RING_ELSP(base)				_MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base)		_MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base)		_MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base)		_MMIO((base) + 0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
-#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
+#define   CTX_CTRL_RS_CTX_ENABLE		(1 << 1)
 #define	  CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT	(1 << 2)
-#define RING_CONTEXT_STATUS_BUF_BASE(engine)	_MMIO((engine)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(engine, i)	_MMIO((engine)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(engine, i)	_MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(engine)		_MMIO((engine)->mmio_base + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(engine)	_MMIO((engine)->mmio_base + 0x510)
-#define RING_EXECLIST_CONTROL(engine)		_MMIO((engine)->mmio_base + 0x550)
+#define RING_CONTEXT_STATUS_BUF_BASE(base)	_MMIO((base) + 0x370)
+#define RING_CONTEXT_STATUS_BUF_LO(base, i)	_MMIO((base) + 0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(base, i)	_MMIO((base) + 0x370 + (i) * 8 + 4)
+#define RING_CONTEXT_STATUS_PTR(base)		_MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base)		_MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base)		_MMIO((base) + 0x550)
 #define	  EL_CTRL_LOAD				(1 << 0)
 
 /* The docs specify that the write pointer wraps around after 5h, "After status
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a5b4a2d9a4929..48ba4d61a4ae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -575,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
 static void flush_cs_tlb(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
 
 	if (!IS_GEN_RANGE(dev_priv, 6, 7))
 		return;
 
 	/* ring should be idle before issuing a sync flush*/
-	WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
-
-	I915_WRITE(instpm,
-		   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
-				      INSTPM_SYNC_FLUSH));
-	if (intel_wait_for_register(&dev_priv->uncore,
-				    instpm, INSTPM_SYNC_FLUSH, 0,
+	WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+	ENGINE_WRITE(engine, RING_INSTPM,
+		     _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+					INSTPM_SYNC_FLUSH));
+	if (intel_wait_for_register(engine->uncore,
+				    RING_INSTPM(engine->mmio_base),
+				    INSTPM_SYNC_FLUSH, 0,
 				    1000))
 		DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
 			  engine->name);
@@ -606,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	if (INTEL_GEN(dev_priv) > 2) {
-		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-		if (intel_wait_for_register(&dev_priv->uncore,
+		ENGINE_WRITE(engine,
+			     RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
+		if (intel_wait_for_register(engine->uncore,
 					    RING_MI_MODE(engine->mmio_base),
 					    MODE_IDLE,
 					    MODE_IDLE,
 					    1000)) {
 			DRM_ERROR("%s : timed out trying to stop ring\n",
 				  engine->name);
-			/* Sometimes we observe that the idle flag is not
+
+			/*
+			 * Sometimes we observe that the idle flag is not
 			 * set even though the ring is empty. So double
 			 * check before giving up.
 			 */
-			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
+			if (ENGINE_READ(engine, RING_HEAD) !=
+			    ENGINE_READ(engine, RING_TAIL))
 				return false;
 		}
 	}
 
-	I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+	ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
 
-	I915_WRITE_HEAD(engine, 0);
-	I915_WRITE_TAIL(engine, 0);
+	ENGINE_WRITE(engine, RING_HEAD, 0);
+	ENGINE_WRITE(engine, RING_TAIL, 0);
 
 	/* The ring must be empty before it is disabled */
-	I915_WRITE_CTL(engine, 0);
+	ENGINE_WRITE(engine, RING_CTL, 0);
 
-	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
+	return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
 }
 
 static int init_ring_common(struct intel_engine_cs *engine)
@@ -640,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	struct intel_ring *ring = engine->buffer;
 	int ret = 0;
 
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
 
 	if (!stop_ring(engine)) {
 		/* G45 ring initialization often fails to reset head to zero */
 		DRM_DEBUG_DRIVER("%s head not reset to zero "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				engine->name,
-				I915_READ_CTL(engine),
-				I915_READ_HEAD(engine),
-				I915_READ_TAIL(engine),
-				I915_READ_START(engine));
+				ENGINE_READ(engine, RING_CTL),
+				ENGINE_READ(engine, RING_HEAD),
+				ENGINE_READ(engine, RING_TAIL),
+				ENGINE_READ(engine, RING_START));
 
 		if (!stop_ring(engine)) {
 			DRM_ERROR("failed to set %s head to zero "
 				  "ctl %08x head %08x tail %08x start %08x\n",
 				  engine->name,
-				  I915_READ_CTL(engine),
-				  I915_READ_HEAD(engine),
-				  I915_READ_TAIL(engine),
-				  I915_READ_START(engine));
+				  ENGINE_READ(engine, RING_CTL),
+				  ENGINE_READ(engine, RING_HEAD),
+				  ENGINE_READ(engine, RING_TAIL),
+				  ENGINE_READ(engine, RING_START));
 			ret = -EIO;
 			goto out;
 		}
@@ -673,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	intel_engine_reset_breadcrumbs(engine);
 
 	/* Enforce ordering by reading HEAD register back */
-	I915_READ_HEAD(engine);
+	ENGINE_READ(engine, RING_HEAD);
 
 	/* Initialize the ring. This must happen _after_ we've cleared the ring
 	 * registers with the above sequence (the readback of the HEAD registers
 	 * also enforces ordering), otherwise the hw might lose the new ring
 	 * register values. */
-	I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
+	ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
 
 	/* WaClearRingBufHeadRegAtInit:ctg,elk */
-	if (I915_READ_HEAD(engine))
+	if (ENGINE_READ(engine, RING_HEAD))
 		DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
-				 engine->name, I915_READ_HEAD(engine));
+				 engine->name, ENGINE_READ(engine, RING_HEAD));
 
 	/* Check that the ring offsets point within the ring! */
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -692,43 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	intel_ring_update_space(ring);
 
 	/* First wake the ring up to an empty/idle ring */
-	I915_WRITE_HEAD(engine, ring->head);
-	I915_WRITE_TAIL(engine, ring->head);
-	(void)I915_READ_TAIL(engine);
+	ENGINE_WRITE(engine, RING_HEAD, ring->head);
+	ENGINE_WRITE(engine, RING_TAIL, ring->head);
+	ENGINE_POSTING_READ(engine, RING_TAIL);
 
-	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
+	ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (intel_wait_for_register(&dev_priv->uncore,
+	if (intel_wait_for_register(engine->uncore,
 				    RING_CTL(engine->mmio_base),
 				    RING_VALID, RING_VALID,
 				    50)) {
 		DRM_ERROR("%s initialization failed "
 			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
 			  engine->name,
-			  I915_READ_CTL(engine),
-			  I915_READ_CTL(engine) & RING_VALID,
-			  I915_READ_HEAD(engine), ring->head,
-			  I915_READ_TAIL(engine), ring->tail,
-			  I915_READ_START(engine),
+			  ENGINE_READ(engine, RING_CTL),
+			  ENGINE_READ(engine, RING_CTL) & RING_VALID,
+			  ENGINE_READ(engine, RING_HEAD), ring->head,
+			  ENGINE_READ(engine, RING_TAIL), ring->tail,
+			  ENGINE_READ(engine, RING_START),
 			  i915_ggtt_offset(ring->vma));
 		ret = -EIO;
 		goto out;
 	}
 
 	if (INTEL_GEN(dev_priv) > 2)
-		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+		ENGINE_WRITE(engine,
+			     RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 
 	/* Now awake, let it get started */
 	if (ring->tail != ring->head) {
-		I915_WRITE_TAIL(engine, ring->tail);
-		(void)I915_READ_TAIL(engine);
+		ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+		ENGINE_POSTING_READ(engine, RING_TAIL);
 	}
 
 	/* Papering over lost _interrupts_ immediately following the restart */
 	intel_engine_queue_breadcrumbs(engine);
 out:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -869,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (INTEL_GEN(dev_priv) >= 6)
-		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+		ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
 
 	return 0;
 }
@@ -896,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
 
 static void i9xx_submit_request(struct i915_request *request)
 {
-	struct drm_i915_private *dev_priv = request->i915;
-
 	i915_request_submit(request);
 
-	I915_WRITE_TAIL(request->engine,
-			intel_ring_set_tail(request->ring, request->tail));
+	ENGINE_WRITE(request->engine, RING_TAIL,
+		     intel_ring_set_tail(request->ring, request->tail));
 }
 
 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -973,20 +976,20 @@ gen5_irq_disable(struct intel_engine_cs *engine)
 static void
 i9xx_irq_enable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	GEM_BUG_ON(engine->id != RCS0);
 
-	dev_priv->irq_mask &= ~engine->irq_enable_mask;
-	I915_WRITE(IMR, dev_priv->irq_mask);
-	POSTING_READ_FW(RING_IMR(engine->mmio_base));
+	engine->i915->irq_mask &= ~engine->irq_enable_mask;
+	ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
+	ENGINE_POSTING_READ(engine, RING_IMR);
 }
 
 static void
 i9xx_irq_disable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	GEM_BUG_ON(engine->id != RCS0);
 
-	dev_priv->irq_mask |= engine->irq_enable_mask;
-	I915_WRITE(IMR, dev_priv->irq_mask);
+	engine->i915->irq_mask |= engine->irq_enable_mask;
+	ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
 }
 
 static void
@@ -1026,47 +1029,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
 static void
 gen6_irq_enable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	I915_WRITE_IMR(engine,
-		       ~(engine->irq_enable_mask |
-			 engine->irq_keep_mask));
+	ENGINE_WRITE(engine, RING_IMR,
+		     ~(engine->irq_enable_mask | engine->irq_keep_mask));
 
 	/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
-	POSTING_READ_FW(RING_IMR(engine->mmio_base));
+	ENGINE_POSTING_READ(engine, RING_IMR);
 
-	gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
+	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
 gen6_irq_disable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-	gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
+	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
 
 	/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
-	POSTING_READ_FW(RING_IMR(engine->mmio_base));
+	ENGINE_POSTING_READ(engine, RING_IMR);
 
-	gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
+	gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	I915_WRITE_IMR(engine, ~0);
-	gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
+	ENGINE_WRITE(engine, RING_IMR, ~0);
+	gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static int
@@ -1577,7 +1571,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	WARN_ON(INTEL_GEN(dev_priv) > 2 &&
-		(I915_READ_MODE(engine) & MODE_IDLE) == 0);
+		(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
 	intel_ring_unpin(engine->buffer);
 	intel_ring_put(engine->buffer);
@@ -1612,11 +1606,11 @@ static int load_pd_dir(struct i915_request *rq,
 		return PTR_ERR(cs);
 
 	*cs++ = MI_LOAD_REGISTER_IMM(1);
-	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
 	*cs++ = PP_DIR_DCLV_2G;
 
 	*cs++ = MI_LOAD_REGISTER_IMM(1);
-	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
 	*cs++ = ppgtt->pd.base.ggtt_offset << 10;
 
 	intel_ring_advance(rq, cs);
@@ -1635,7 +1629,7 @@ static int flush_pd_dir(struct i915_request *rq)
 
 	/* Stall until the page table load is complete */
 	*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
-	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
 	*cs++ = i915_scratch_offset(rq->i915);
 	*cs++ = MI_NOOP;
 
@@ -2052,7 +2046,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
 
 static void gen6_bsd_submit_request(struct i915_request *request)
 {
-	struct intel_uncore *uncore = &request->i915->uncore;
+	struct intel_uncore *uncore = request->engine->uncore;
 
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a02c92dac5da1..e58d6f04177b7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -29,23 +29,44 @@ struct drm_printer;
 #define CACHELINE_BYTES 64
 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
 
-#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
-#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
+/*
+ * The register defines to be used with the following macros need to accept a
+ * base param, e.g:
+ *
+ * REG_FOO(base) _MMIO((base) + <relative offset>)
+ * ENGINE_READ(engine, REG_FOO);
+ *
+ * register arrays are to be defined and accessed as follows:
+ *
+ * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
+ * ENGINE_READ_IDX(engine, REG_BAR, i)
+ */
+
+#define __ENGINE_REG_OP(op__, engine__, ...) \
+	intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
+
+#define __ENGINE_READ_OP(op__, engine__, reg__) \
+	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
 
-#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
-#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
+#define ENGINE_READ16(...)	__ENGINE_READ_OP(read16, __VA_ARGS__)
+#define ENGINE_READ(...)	__ENGINE_READ_OP(read, __VA_ARGS__)
+#define ENGINE_READ_FW(...)	__ENGINE_READ_OP(read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
 
-#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
-#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
+#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
+	__ENGINE_REG_OP(read64_2x32, (engine__), \
+			lower_reg__((engine__)->mmio_base), \
+			upper_reg__((engine__)->mmio_base))
 
-#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
-#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
+#define ENGINE_READ_IDX(engine__, reg__, idx__) \
+	__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
 
-#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
-#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
+#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
+	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
 
-#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
-#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
+#define ENGINE_WRITE16(...)	__ENGINE_WRITE_OP(write16, __VA_ARGS__)
+#define ENGINE_WRITE(...)	__ENGINE_WRITE_OP(write, __VA_ARGS__)
+#define ENGINE_WRITE_FW(...)	__ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
 
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 35d9ce33bf83b..5c80704bf283d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1814,7 +1814,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
 
 /**
  * __intel_wait_for_register - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
  * @reg: the register to read
  * @mask: mask to apply to register value
  * @value: expected value

From e163484afa8d6b1775cd35201fe7d523a45e0409 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Tue, 26 Mar 2019 18:02:18 +0100
Subject: [PATCH 79/95] drm/i915: Update size upon return from GEM_CREATE
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Since GEM_CREATE is trying to outsmart the user by rounding up unaligned
objects, we used to update the size returned to userspace.
This update seems to have been lost throughout the history.

v2: Use round_up(), reorder locals (Chris)

References: ff72145badb8 ("drm: dumb scanout create/mmap for intel/radeon (v3)")
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Janusz Krzysztofik <janusz.krzysztofik@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326170218.13255-1-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6cdd5fb9deb9..e506e43cfade9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -622,14 +622,15 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 static int
 i915_gem_create(struct drm_file *file,
 		struct drm_i915_private *dev_priv,
-		u64 size,
+		u64 *size_p,
 		u32 *handle_p)
 {
 	struct drm_i915_gem_object *obj;
-	int ret;
 	u32 handle;
+	u64 size;
+	int ret;
 
-	size = roundup(size, PAGE_SIZE);
+	size = round_up(*size_p, PAGE_SIZE);
 	if (size == 0)
 		return -EINVAL;
 
@@ -645,6 +646,7 @@ i915_gem_create(struct drm_file *file,
 		return ret;
 
 	*handle_p = handle;
+	*size_p = obj->base.size;
 	return 0;
 }
 
@@ -657,7 +659,7 @@ i915_gem_dumb_create(struct drm_file *file,
 	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
 	args->size = args->pitch * args->height;
 	return i915_gem_create(file, to_i915(dev),
-			       args->size, &args->handle);
+			       &args->size, &args->handle);
 }
 
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -682,7 +684,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 	i915_gem_flush_free_objects(dev_priv);
 
 	return i915_gem_create(file, dev_priv,
-			       args->size, &args->handle);
+			       &args->size, &args->handle);
 }
 
 static inline enum fb_op_origin

From 9511cb6481af8c903f7649f82706ae908c224383 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 26 Mar 2019 18:00:07 +0000
Subject: [PATCH 80/95] drm/i915: Adding missing '; ' to ENGINE_INSTANCES

Tvrtko spotted that I left off the trailing ';'. It went unnoticed by CI
because despite adding the macro, we didn't add a user, so include one as
well (a simple debug print).

Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Fixes: 97ee6e925552 ("drm/i915: stop storing the media fuse")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326180007.11722-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h          | 2 +-
 drivers/gpu/drm/i915/intel_device_info.c | 8 ++++++--
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b8df3eb28d35a..b05687ed91ef7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2458,7 +2458,7 @@ static inline unsigned int i915_sg_segment_size(void)
 	unsigned int first__ = (first);					\
 	unsigned int count__ = (count);					\
 	(INTEL_INFO(dev_priv)->engine_mask &				\
-	 GENMASK(first__ + count__ - 1, first__)) >> first__		\
+	 GENMASK(first__ + count__ - 1, first__)) >> first__;		\
 })
 #define VDBOX_MASK(dev_priv) \
 	ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 5776a0def7ac9..e0f5e0231d045 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -890,7 +890,6 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
 		      GEN11_GT_VEBOX_DISABLE_SHIFT;
 
-	DRM_DEBUG_DRIVER("vdbox enable: %04x\n", vdbox_mask);
 	for (i = 0; i < I915_MAX_VCS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VCS(i)))
 			continue;
@@ -908,8 +907,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 		if (logical_vdbox++ % 2 == 0)
 			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
 	}
+	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
+			 vdbox_mask, VDBOX_MASK(dev_priv));
+	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
 
-	DRM_DEBUG_DRIVER("vebox enable: %04x\n", vebox_mask);
 	for (i = 0; i < I915_MAX_VECS; i++) {
 		if (!HAS_ENGINE(dev_priv, _VECS(i)))
 			continue;
@@ -919,4 +920,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
 		}
 	}
+	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
+			 vebox_mask, VEBOX_MASK(dev_priv));
+	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
 }

From 98721b84bc8319ffcbc450ca7de032702248b2ba Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Tue, 26 Mar 2019 16:38:17 -0700
Subject: [PATCH 81/95] drm/i915: fix i386 build of 64b raw_uncore functions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When building with ARCH=i386, readq and writeq are not defined,
resulting in:

intel_uncore.h: In function ‘__raw_uncore_read64’:
intel_uncore.h:257:9: error: implicit declaration of function ‘readq’;
	did you mean ‘readl’? [-Werror=implicit-function-declaration]
  return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
         ^

and:

intel_uncore.h: In function ‘__raw_uncore_write64’:
intel_uncore.h:264:2: error: implicit declaration of function ‘writeq’;
	did you mean ‘writel’? [-Werror=implicit-function-declaration]
  write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
  ^

Add the io-64-nonatomic-lo-hi include to have readq and writeq available
for all builds. This header internally includes linux/io.h, so the
native readq and writeq definitions will be used when available.

Fixes: 6cc5ca768825 ("drm/i915: rename raw reg access functions")
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326233817.5417-1-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_uncore.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 06762501e9eee..50d226f687538 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -28,6 +28,7 @@
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
 #include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 
 #include "i915_reg.h"
 

From 547fcf9b1c608cf5c43c156a8773a94c6a38dc44 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Tue, 26 Mar 2019 16:02:23 -0700
Subject: [PATCH 82/95] drm/i915/icl: Fix VEBOX mismatch BUG_ON()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

GT VEBOX DISABLE is only 4 bits wide but it was using a 8 bits wide
mask, the remaning reserved bits is set to 0 causing 4 more
nonexistent VEBOX engines being detected as enabled, triggering the
BUG_ON() because of mismatch between vebox_mask and newly added
VEBOX_MASK().

[   64.081621] [drm:intel_device_info_init_mmio [i915]] vdbox enable: 0005, instances: 0005
[   64.081763] [drm:intel_device_info_init_mmio [i915]] vebox enable: 00f1, instances: 0001
[   64.081825] intel_device_info_init_mmio:925 GEM_BUG_ON(vebox_mask != ({ unsigned int first__ = (VECS0); unsigned int count__ = (2); ((&(dev_priv)->__info)->engine_mask & (((~0UL) - (1UL << (first__)) + 1) & (~0UL >> (64 - 1 - (first__ + count__ - 1))))) >> first__; }))
[   64.082047] ------------[ cut here ]------------
[   64.082054] kernel BUG at drivers/gpu/drm/i915/intel_device_info.c:925!

BSpec: 20680
Fixes: 9511cb6481af ("drm/i915: Adding missing '; ' to ENGINE_INSTANCES")
Fixes: 26376a7e74d2 ("drm/i915/icl: Check for fused-off VDBOX and VEBOX instances")
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326230223.26336-1-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 362f483f8ab2d..c866379a521bc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2953,7 +2953,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE	_MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK	0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT	16
-#define   GEN11_GT_VEBOX_DISABLE_MASK	(0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK	(0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF

From 37aa52bff2bcd4377c0f747f730f3d436f2fe58e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 26 Mar 2019 16:25:51 +0200
Subject: [PATCH 83/95] drm/i915: Add broadcast RGB property for DP MST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add the "Broadcast RGB" property to MST connectors, and implement
the same logic for it as we have in the SST code.

v2: Extract and reuse intel_dp_limited_color_range()

Cc: Ivan Vlk <ari@adyline.sk>
Tested-by: Ivan Vlk <ari@adyline.sk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108821
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20190326142556.21176-1-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_dp.c     | 39 ++++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_dp_mst.c | 15 +++++++++--
 drivers/gpu/drm/i915/intel_drv.h    |  2 ++
 3 files changed, 40 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 74fe14103459a..352dca9a9d510 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2107,6 +2107,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	return 0;
 }
 
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+				  const struct drm_connector_state *conn_state)
+{
+	const struct intel_digital_connector_state *intel_conn_state =
+		to_intel_digital_connector_state(conn_state);
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->base.adjusted_mode;
+
+	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+		/*
+		 * See:
+		 * CEA-861-E - 5.1 Default Encoding Parameters
+		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+		 */
+		return crtc_state->pipe_bpp != 18 &&
+			drm_default_rgb_quant_range(adjusted_mode) ==
+			HDMI_QUANTIZATION_RANGE_LIMITED;
+	} else {
+		return intel_conn_state->broadcast_rgb ==
+			INTEL_BROADCAST_RGB_LIMITED;
+	}
+}
+
 int
 intel_dp_compute_config(struct intel_encoder *encoder,
 			struct intel_crtc_state *pipe_config,
@@ -2175,20 +2198,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	if (ret < 0)
 		return ret;
 
-	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
-		/*
-		 * See:
-		 * CEA-861-E - 5.1 Default Encoding Parameters
-		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
-		 */
-		pipe_config->limited_color_range =
-			pipe_config->pipe_bpp != 18 &&
-			drm_default_rgb_quant_range(adjusted_mode) ==
-			HDMI_QUANTIZATION_RANGE_LIMITED;
-	} else {
-		pipe_config->limited_color_range =
-			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
-	}
+	pipe_config->limited_color_range =
+		intel_dp_limited_color_range(pipe_config, conn_state);
 
 	if (!pipe_config->dsc_params.compression_enable)
 		intel_link_compute_m_n(pipe_config->pipe_bpp,
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index ecad1f58ebf8c..3e45420c6af04 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -77,6 +77,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
 		pipe_config->has_audio = true;
 
+	pipe_config->limited_color_range =
+		intel_dp_limited_color_range(pipe_config, conn_state);
+
 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 	pipe_config->pbn = mst_pbn;
 
@@ -117,7 +120,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
 	struct drm_crtc *new_crtc = new_conn_state->crtc;
 	struct drm_crtc_state *crtc_state;
 	struct drm_dp_mst_topology_mgr *mgr;
-	int ret = 0;
+	int ret;
+
+	ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+	if (ret)
+		return ret;
 
 	if (!old_conn_state->crtc)
 		return 0;
@@ -354,11 +361,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
 	.detect = intel_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.atomic_get_property = intel_digital_connector_atomic_get_property,
+	.atomic_set_property = intel_digital_connector_atomic_set_property,
 	.late_register = intel_connector_register,
 	.early_unregister = intel_connector_unregister,
 	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
 
 static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -487,6 +496,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	if (ret)
 		goto err;
 
+	intel_attach_broadcast_rgb_property(connector);
+
 	return connector;
 
 err:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ab30067fe872a..58701a2621211 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1908,6 +1908,8 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+				  const struct drm_connector_state *conn_state);
 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
 			   i915_reg_t dp_reg, enum port port,
 			   enum pipe *pipe);

From 765bdb0b3948747dda8c9bab300013e34f76fe88 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Wed, 27 Mar 2019 15:19:29 +0200
Subject: [PATCH 84/95] drm/i915: Expose the force_audio property with DP MST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We already expose the force_audio property with SST. Do the same
with MST.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326142556.21176-2-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_dp_mst.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e45420c6af04..880003c8576bd 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -38,6 +38,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_connector *connector = conn_state->connector;
+	struct intel_digital_connector_state *intel_conn_state =
+		to_intel_digital_connector_state(conn_state);
 	void *port = to_intel_connector(connector)->port;
 	struct drm_atomic_state *state = pipe_config->base.state;
 	struct drm_crtc *crtc = pipe_config->base.crtc;
@@ -62,6 +64,14 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 		DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
 			      bpp);
 	}
+
+	if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+		pipe_config->has_audio =
+			drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+	else
+		pipe_config->has_audio =
+			intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
 	/*
 	 * for MST we always configure max link bw - the spec doesn't
 	 * seem to suggest we should do otherwise.
@@ -74,9 +84,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 	pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
 
-	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
-		pipe_config->has_audio = true;
-
 	pipe_config->limited_color_range =
 		intel_dp_limited_color_range(pipe_config, conn_state);
 
@@ -496,6 +503,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	if (ret)
 		goto err;
 
+	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
 
 	return connector;

From f1477219869cc943ef9e0248b308da7b900c65ec Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 26 Mar 2019 16:25:53 +0200
Subject: [PATCH 85/95] drm/i915: Remove the 8bpc shackles from DP MST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Allow DP MST to output any color depth. This means deep color as
well as falling back to 6bpc if we would otherwise require too
much bandwidth.

TODO: We should probably extend bw_contstrained scheme to force
all streams on the link to 6bpc if we can't fit the new stream(s)
otherwise.

v2: Use a proper for-loop (Jani)

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20190326142556.21176-3-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_dp.c     |   8 +-
 drivers/gpu/drm/i915/intel_dp_mst.c | 117 +++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_drv.h    |   8 ++
 3 files changed, 80 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 352dca9a9d510..72c49070ed14c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1723,12 +1723,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 	}
 }
 
-struct link_config_limits {
-	int min_clock, max_clock;
-	int min_lane_count, max_lane_count;
-	int min_bpp, max_bpp;
-};
-
 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
 					 const struct intel_crtc_state *pipe_config)
 {
@@ -1791,7 +1785,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 }
 
 /* Adjust link config limits based on compliance test requests. */
-static void
+void
 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
 				  struct intel_crtc_state *pipe_config,
 				  struct link_config_limits *limits)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 880003c8576bd..8796fbd654134 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -29,41 +29,78 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
 
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+					    struct intel_crtc_state *crtc_state,
+					    struct drm_connector_state *conn_state,
+					    struct link_config_limits *limits)
+{
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_dp *intel_dp = &intel_mst->primary->dp;
+	struct intel_connector *connector =
+		to_intel_connector(conn_state->connector);
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->base.adjusted_mode;
+	void *port = connector->port;
+	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+					   DP_DPCD_QUIRK_CONSTANT_N);
+	int bpp, slots = -EINVAL;
+
+	crtc_state->lane_count = limits->max_lane_count;
+	crtc_state->port_clock = limits->max_clock;
+
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+		crtc_state->pipe_bpp = bpp;
+
+		crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+						       crtc_state->pipe_bpp);
+
+		slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
+						      port, crtc_state->pbn);
+		if (slots == -EDEADLK)
+			return slots;
+		if (slots >= 0)
+			break;
+	}
+
+	if (slots < 0) {
+		DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+		return slots;
+	}
+
+	intel_link_compute_m_n(crtc_state->pipe_bpp,
+			       crtc_state->lane_count,
+			       adjusted_mode->crtc_clock,
+			       crtc_state->port_clock,
+			       &crtc_state->dp_m_n,
+			       constant_n);
+	crtc_state->dp_m_n.tu = slots;
+
+	return 0;
+}
+
 static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 				       struct intel_crtc_state *pipe_config,
 				       struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_connector *connector = conn_state->connector;
+	struct intel_dp *intel_dp = &intel_mst->primary->dp;
+	struct intel_connector *connector =
+		to_intel_connector(conn_state->connector);
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
-	void *port = to_intel_connector(connector)->port;
-	struct drm_atomic_state *state = pipe_config->base.state;
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_crtc_state *old_crtc_state =
-		drm_atomic_get_old_crtc_state(state, crtc);
-	int bpp;
-	int lane_count, slots =
-		to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
-	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int mst_pbn;
-	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
-					   DP_DPCD_QUIRK_CONSTANT_N);
+	const struct drm_display_mode *adjusted_mode =
+		&pipe_config->base.adjusted_mode;
+	void *port = connector->port;
+	struct link_config_limits limits;
+	int ret;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return -EINVAL;
 
 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 	pipe_config->has_pch_encoder = false;
-	bpp = 24;
-	if (intel_dp->compliance.test_data.bpc) {
-		bpp = intel_dp->compliance.test_data.bpc * 3;
-		DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
-			      bpp);
-	}
 
 	if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
 		pipe_config->has_audio =
@@ -76,36 +113,25 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	 * for MST we always configure max link bw - the spec doesn't
 	 * seem to suggest we should do otherwise.
 	 */
-	lane_count = intel_dp_max_lane_count(intel_dp);
+	limits.min_clock =
+	limits.max_clock = intel_dp_max_link_rate(intel_dp);
 
-	pipe_config->lane_count = lane_count;
+	limits.min_lane_count =
+	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
 
-	pipe_config->pipe_bpp = bpp;
+	limits.min_bpp = 6 * 3;
+	limits.max_bpp = pipe_config->pipe_bpp;
 
-	pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
+	ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
+					       conn_state, &limits);
+	if (ret)
+		return ret;
 
 	pipe_config->limited_color_range =
 		intel_dp_limited_color_range(pipe_config, conn_state);
 
-	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
-	pipe_config->pbn = mst_pbn;
-
-	slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
-					      mst_pbn);
-	if (slots < 0) {
-		DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
-			      slots);
-		return slots;
-	}
-
-	intel_link_compute_m_n(bpp, lane_count,
-			       adjusted_mode->crtc_clock,
-			       pipe_config->port_clock,
-			       &pipe_config->dp_m_n,
-			       constant_n);
-
-	pipe_config->dp_m_n.tu = slots;
-
 	if (IS_GEN9_LP(dev_priv))
 		pipe_config->lane_lat_optim_mask =
 			bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -389,7 +415,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_dp *intel_dp = intel_connector->mst_port;
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-	int bpp = 24; /* MST uses fixed bpp */
 	int max_rate, mode_rate, max_lanes, max_link_clock;
 
 	if (drm_connector_is_unregistered(connector))
@@ -402,7 +427,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
 	max_lanes = intel_dp_max_lane_count(intel_dp);
 
 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-	mode_rate = intel_dp_link_required(mode->clock, bpp);
+	mode_rate = intel_dp_link_required(mode->clock, 18);
 
 	/* TODO - validate mode against available PBN for link */
 	if (mode->clock < 10000)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 58701a2621211..6aab82d8d9c57 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1908,6 +1908,14 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
+struct link_config_limits {
+	int min_clock, max_clock;
+	int min_lane_count, max_lane_count;
+	int min_bpp, max_bpp;
+};
+void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+				       struct intel_crtc_state *pipe_config,
+				       struct link_config_limits *limits);
 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state);
 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,

From 5ca0ef8a56b8c4812ed78ef9ca53052191dab6e7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 26 Mar 2019 16:25:54 +0200
Subject: [PATCH 86/95] drm/i915: Add max_bpc property for DP MST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Allow the user to limit the output bpc with DP MST.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326142556.21176-4-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_dp_mst.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 8796fbd654134..19d81cef2ab6f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -530,6 +530,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
+	drm_connector_attach_max_bpc_property(connector, 6, 12);
 
 	return connector;
 

From 5aa2c9ae9371326d9ee9199fe5dee072cc16c6a8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 26 Mar 2019 16:25:55 +0200
Subject: [PATCH 87/95] drm/i915: Update TRANS_MSA_MISC for fastsets
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Update the DP MSA MISC bits for fastsets. This is needed
when we change between limited and full range RGB output.

On HSW+ changing limited_range does not currently result in a
full modeset since we have don't have the readout code for it
(for DP we could, and probably should, readout from TRANS_MSA_MISC
itself, for HDMI we would have to rely on the infoframe). So
the PIPE_CONF_CHECK() is only performed for pre-HSW platforms.
That means any change in the value will result in a fastset
instead. Fortunately there is no prohibition to changing
TRANS_MSA_MISC dynamically, so it looks like we can legally do
fastsets for this.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190326142556.21176-5-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d33fe2952be3a..3f1e491bd0c0a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3524,6 +3524,8 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
+	intel_ddi_set_pipe_settings(crtc_state);
+
 	intel_psr_update(intel_dp, crtc_state);
 	intel_edp_drrs_enable(intel_dp, crtc_state);
 

From a2deb87396b0903d30ce73af6fc8fc82e9e32c62 Mon Sep 17 00:00:00 2001
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Date: Wed, 27 Mar 2019 17:06:36 +0800
Subject: [PATCH 88/95] drm/i915: Disable semaphore on vGPU for now

This is to disable semaphore usage when on vGPU for now. Unfortunately
GVT-g hasn't fully enabled semaphore usage yet, so current guest with
semaphore use would cause vGPU failure.

Although current semaphore failure with vGPU can be simply resolved by
allowing cmd parser to accept MI_SEMAPHORE_WAIT command with address
audit, we're checking general usage of semaphore and how we should
handle it properly for virtualization in consider of function and
security concern. So we decide to request to disable it for now in
guest driver. Once GVT could support it, we would add new compat bit
to turn it on.

Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") #vgpu
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190327090636.3547-1-zhenyuw@linux.intel.com
---
 drivers/gpu/drm/i915/intel_lrc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3870215061997..bec232acc8d7b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2313,8 +2313,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 	engine->park = NULL;
 	engine->unpark = NULL;
 
-	engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+	if (!intel_vgpu_active(engine->i915))
+		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
 	if (engine->preempt_context)
 		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 }

From 96fd2c6633b0484b030eb15e646ad50426c41e6a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 27 Mar 2019 10:58:14 +0000
Subject: [PATCH 89/95] drm/i915: Drop new chunks of context creation ABI (for
 now)

The intent was to expose these as part of the means to perform full
context recovery (though not the SINGLE_TIMELINE, that is for later and
just sucked as collateral damage). As that requires a couple more
patches to complete the series, roll back the earlier chunks of ABI for
an intervening PR. We keep all the internals intact and under selftests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190327105814.14694-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c         |  2 --
 drivers/gpu/drm/i915/i915_gem_context.c |  9 +++++++++
 include/uapi/drm/i915_drm.h             | 18 +-----------------
 3 files changed, 10 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ca8a8891b7b3e..a9fcb32f4989c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -3122,8 +3122,6 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 25f267a03d3d8..662da485e15fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -94,6 +94,9 @@
 #include "intel_lrc_reg.h"
 #include "intel_workarounds.h"
 
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
+#define I915_CONTEXT_PARAM_VM 0x9
+
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
 static struct i915_global_gem_context {
@@ -412,6 +415,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+	BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
+		     ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
 	    !HAS_EXECLISTS(dev_priv))
 		return ERR_PTR(-EINVAL);
@@ -971,6 +976,8 @@ static int get_ppgtt(struct i915_gem_context *ctx,
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
+	return -EINVAL; /* nothing to see here; please move along */
+
 	if (!ctx->ppgtt)
 		return -ENODEV;
 
@@ -1071,6 +1078,8 @@ static int set_ppgtt(struct i915_gem_context *ctx,
 	struct i915_hw_ppgtt *ppgtt, *old;
 	int err;
 
+	return -EINVAL; /* nothing to see here; please move along */
+
 	if (args->size)
 		return -EINVAL;
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 9999f7d6a5a9b..52051d24d89df 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -343,8 +343,6 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_PERF_ADD_CONFIG	0x37
 #define DRM_I915_PERF_REMOVE_CONFIG	0x38
 #define DRM_I915_QUERY			0x39
-#define DRM_I915_GEM_VM_CREATE		0x3a
-#define DRM_I915_GEM_VM_DESTROY		0x3b
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -405,8 +403,6 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
-#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
-#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -1456,9 +1452,8 @@ struct drm_i915_gem_context_create_ext {
 	__u32 ctx_id; /* output: id of new context*/
 	__u32 flags;
 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
-#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
-	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+	(-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
 	__u64 extensions;
 };
 
@@ -1500,17 +1495,6 @@ struct drm_i915_gem_context_param {
  * On creation, all new contexts are marked as recoverable.
  */
 #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
-
-	/*
-	 * The id of the associated virtual memory address space (ppGTT) of
-	 * this context. Can be retrieved and passed to another context
-	 * (on the same fd) for both to use the same ppGTT and so share
-	 * address layouts, and avoid reloading the page tables on context
-	 * switches between themselves.
-	 *
-	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
-	 */
-#define I915_CONTEXT_PARAM_VM		0x9
 /* Must be kept compact -- no holes and well documented */
 
 	__u64 value;

From 3bd02fa4be3aba778cd5aa0ca9e072244ce45a74 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Fri, 22 Mar 2019 14:59:52 +0100
Subject: [PATCH 90/95] drm/i915: Handle YUV subpixel support better
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Y41x formats is a 4:4:4 format, so it can be addressed with pixel level accuracy.
Meanwhile it seems that while rotating YUYV 4:2:2 formats need a multiple of 2
for width and height, otherwise corruption occurs.

For YUV 4:2:2, the spec says that w/h should always be even, but we get
away with odd height while unrotated. When rotating it seems corruption
occurs with an odd x/y, and w/h should always be even.
Just to be completely paranoid, reject odd x/y w/h when rotating 90/270.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322135954.20434-1-maarten.lankhorst@linux.intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_sprite.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 3f2055f70d051..aca987356194b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -269,7 +269,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_rect *src = &plane_state->base.src;
-	u32 src_x, src_y, src_w, src_h;
+	u32 src_x, src_y, src_w, src_h, hsub, vsub;
+	bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
 
 	/*
 	 * Hardware doesn't handle subpixel coordinates.
@@ -287,18 +288,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 	src->y1 = src_y << 16;
 	src->y2 = (src_y + src_h) << 16;
 
-	if (fb->format->is_yuv &&
-	    (src_x & 1 || src_w & 1)) {
-		DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
-			      src_x, src_w);
+	if (!fb->format->is_yuv)
+		return 0;
+
+	/* YUV specific checks */
+	if (!rotated) {
+		hsub = fb->format->hsub;
+		vsub = fb->format->vsub;
+	} else {
+		hsub = vsub = max(fb->format->hsub, fb->format->vsub);
+	}
+
+	if (src_x % hsub || src_w % hsub) {
+		DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
+			      src_x, src_w, hsub, rotated ? "rotated " : "");
 		return -EINVAL;
 	}
 
-	if (fb->format->is_yuv &&
-	    fb->format->num_planes > 1 &&
-	    (src_y & 1 || src_h & 1)) {
-		DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
-			      src_y, src_h);
+	if (src_y % vsub || src_h % vsub) {
+		DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
+			      src_y, src_h, vsub, rotated ? "rotated " : "");
 		return -EINVAL;
 	}
 

From 546f0f6550676db1b5c74ecdabc65bb99de3e0f6 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Fri, 22 Mar 2019 14:59:53 +0100
Subject: [PATCH 91/95] drm/i915: Reject Yf tiling for HDR formats, v2.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This was missing in the original addition of those formats, but in
PLANE_SIZE description it's mentioned that 8 cpp formats are not
valid with Yf tiling. Reject this case properly.

Also reject Y21x Yf tiling support this is also not supported.

Changes since v1:
- Reject Y21x as well.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322135954.20434-2-maarten.lankhorst@linux.intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_sprite.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index aca987356194b..36ee39ec36875 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -2107,12 +2107,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_P010:
 	case DRM_FORMAT_P012:
 	case DRM_FORMAT_P016:
-	case DRM_FORMAT_Y210:
-	case DRM_FORMAT_Y212:
-	case DRM_FORMAT_Y216:
 	case DRM_FORMAT_XVYU2101010:
-	case DRM_FORMAT_XVYU12_16161616:
-	case DRM_FORMAT_XVYU16161616:
 		if (modifier == I915_FORMAT_MOD_Yf_TILED)
 			return true;
 		/* fall through */
@@ -2121,6 +2116,11 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 	case DRM_FORMAT_ABGR16161616F:
 	case DRM_FORMAT_XRGB16161616F:
 	case DRM_FORMAT_ARGB16161616F:
+	case DRM_FORMAT_Y210:
+	case DRM_FORMAT_Y212:
+	case DRM_FORMAT_Y216:
+	case DRM_FORMAT_XVYU12_16161616:
+	case DRM_FORMAT_XVYU16161616:
 		if (modifier == DRM_FORMAT_MOD_LINEAR ||
 		    modifier == I915_FORMAT_MOD_X_TILED ||
 		    modifier == I915_FORMAT_MOD_Y_TILED)

From e9a88dc2876616cc4d1ffb28ec95f2df650fe3d1 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Fri, 22 Mar 2019 14:59:54 +0100
Subject: [PATCH 92/95] drm/i915: Reject rotation for some hdr formats
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

90/270 rotation is not supported for Y21x and the 12/16 bits XVYU formats,
reject support for them.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322135954.20434-3-maarten.lankhorst@linux.intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_sprite.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 36ee39ec36875..65de7387bf1b0 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1531,6 +1531,11 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 		case DRM_FORMAT_XBGR16161616F:
 		case DRM_FORMAT_ARGB16161616F:
 		case DRM_FORMAT_ABGR16161616F:
+		case DRM_FORMAT_Y210:
+		case DRM_FORMAT_Y212:
+		case DRM_FORMAT_Y216:
+		case DRM_FORMAT_XVYU12_16161616:
+		case DRM_FORMAT_XVYU16161616:
 			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
 				      drm_get_format_name(fb->format->format,
 							  &format_name));

From a24dac922fdace8d9109b4225abfd3b04516de7a Mon Sep 17 00:00:00 2001
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Date: Thu, 28 Mar 2019 13:15:45 +0200
Subject: [PATCH 93/95] drm/i915: Update DRIVER_DATE to 20190328

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b05687ed91ef7..056c00a23a449 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -92,8 +92,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20190320"
-#define DRIVER_TIMESTAMP	1553069028
+#define DRIVER_DATE		"20190328"
+#define DRIVER_TIMESTAMP	1553771744
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions

From 4828d64e7f38c68cdfdc62fc75bfebcdb2eacd16 Mon Sep 17 00:00:00 2001
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Date: Thu, 28 Mar 2019 13:53:18 +0200
Subject: [PATCH 94/95] drm/i915: Update DRIVER_DATE to 20190328

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 056c00a23a449..f7d5b47e1dcb2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -93,7 +93,7 @@
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
 #define DRIVER_DATE		"20190328"
-#define DRIVER_TIMESTAMP	1553771744
+#define DRIVER_TIMESTAMP	1553773998
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions

From a01b2c6f47d86c7d1a9fa822b3b91ec233b61784 Mon Sep 17 00:00:00 2001
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Date: Thu, 28 Mar 2019 14:41:55 +0200
Subject: [PATCH 95/95] drm/i915: Update DRIVER_DATE to 20190328

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f7d5b47e1dcb2..25c264e55d3c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -93,7 +93,7 @@
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
 #define DRIVER_DATE		"20190328"
-#define DRIVER_TIMESTAMP	1553773998
+#define DRIVER_TIMESTAMP	1553776914
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions