diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
new file mode 100644
index 0000000000000..d101233ae17ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ti,sn65dsi83.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SN65DSI83 and SN65DSI84 DSI to LVDS bridge chip
+
+maintainers:
+  - Marek Vasut <marex@denx.de>
+
+description: |
+  Texas Instruments SN65DSI83 1x Single-link MIPI DSI
+  to 1x Single-link LVDS
+  https://www.ti.com/lit/gpn/sn65dsi83
+  Texas Instruments SN65DSI84 1x Single-link MIPI DSI
+  to 1x Dual-link or 2x Single-link LVDS
+  https://www.ti.com/lit/gpn/sn65dsi84
+
+properties:
+  compatible:
+    enum:
+      - ti,sn65dsi83
+      - ti,sn65dsi84
+
+  reg:
+    enum:
+      - 0x2c
+      - 0x2d
+
+  enable-gpios:
+    maxItems: 1
+    description: GPIO specifier for bridge_en pin (active high).
+
+  ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+
+    properties:
+      port@0:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Video port for MIPI DSI Channel-A input
+
+        properties:
+          endpoint:
+            $ref: /schemas/media/video-interfaces.yaml#
+            unevaluatedProperties: false
+
+            properties:
+              data-lanes:
+                description: array of physical DSI data lane indexes.
+                minItems: 1
+                maxItems: 4
+                items:
+                  - const: 1
+                  - const: 2
+                  - const: 3
+                  - const: 4
+
+      port@1:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Video port for MIPI DSI Channel-B input
+
+        properties:
+          endpoint:
+            $ref: /schemas/media/video-interfaces.yaml#
+            unevaluatedProperties: false
+
+            properties:
+              data-lanes:
+                description: array of physical DSI data lane indexes.
+                minItems: 1
+                maxItems: 4
+                items:
+                  - const: 1
+                  - const: 2
+                  - const: 3
+                  - const: 4
+
+      port@2:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Video port for LVDS Channel-A output (panel or bridge).
+
+      port@3:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Video port for LVDS Channel-B output (panel or bridge).
+
+    required:
+      - port@0
+      - port@2
+
+required:
+  - compatible
+  - reg
+  - enable-gpios
+  - ports
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: ti,sn65dsi83
+    then:
+      properties:
+        ports:
+          properties:
+            port@1: false
+            port@3: false
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: ti,sn65dsi84
+    then:
+      properties:
+        ports:
+          properties:
+            port@1: false
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        bridge@2d {
+            compatible = "ti,sn65dsi83";
+            reg = <0x2d>;
+
+            enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+
+                    endpoint {
+                        remote-endpoint = <&dsi0_out>;
+                        data-lanes = <1 2 3 4>;
+                    };
+                };
+
+                port@2 {
+                    reg = <2>;
+
+                    endpoint {
+                        remote-endpoint = <&panel_in_lvds>;
+                    };
+                };
+            };
+        };
+    };
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 7f37ec30d9fd7..7f21425d9435a 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -178,6 +178,15 @@ DMA Fence Array
 .. kernel-doc:: include/linux/dma-fence-array.h
    :internal:
 
+DMA Fence Chain
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-fence-chain.c
+   :export:
+
+.. kernel-doc:: include/linux/dma-fence-chain.h
+   :internal:
+
 DMA Fence uABI/Sync File
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 21be6deadc120..d5a73fa2c9ef4 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -469,8 +469,8 @@ DRM MM Range Allocator Function References
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :export:
 
-DRM Cache Handling
-==================
+DRM Cache Handling and Fast WC memcpy()
+=======================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_cache.c
    :export:
diff --git a/MAINTAINERS b/MAINTAINERS
index 49091fbfa22b4..bc336961ed527 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6084,6 +6084,14 @@ T:	git git://anongit.freedesktop.org/drm/drm-misc
 F:	Documentation/devicetree/bindings/display/hisilicon/
 F:	drivers/gpu/drm/hisilicon/
 
+DRM DRIVER FOR HYPERV SYNTHETIC VIDEO DEVICE
+M:	Deepak Rawat <drawat.floss@gmail.com>
+L:	linux-hyperv@vger.kernel.org
+L:	dri-devel@lists.freedesktop.org
+S:	Maintained
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+F:	drivers/gpu/drm/hyperv
+
 DRM DRIVERS FOR LIMA
 M:	Qiang Yu <yuq825@gmail.com>
 L:	dri-devel@lists.freedesktop.org
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index eadd1eaa2fb54..511fe0d217a08 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 		shared_count = fobj->shared_count;
 	else
 		shared_count = 0;
-	fence_excl = rcu_dereference(resv->fence_excl);
+	fence_excl = dma_resv_excl_fence(resv);
 	if (read_seqcount_retry(&resv->seq, seq)) {
 		rcu_read_unlock();
 		goto retry;
@@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	long ret;
 
 	/* Wait on any implicit rendering fences */
-	ret = dma_resv_wait_timeout_rcu(resv, write, true,
-						  MAX_SCHEDULE_TIMEOUT);
+	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
 	if (ret < 0)
 		return ret;
 
@@ -1349,15 +1348,14 @@ EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 #ifdef CONFIG_DEBUG_FS
 static int dma_buf_debug_show(struct seq_file *s, void *unused)
 {
-	int ret;
 	struct dma_buf *buf_obj;
 	struct dma_buf_attachment *attach_obj;
 	struct dma_resv *robj;
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
-	unsigned seq;
 	int count = 0, attach_count, shared_count, i;
 	size_t size = 0;
+	int ret;
 
 	ret = mutex_lock_interruptible(&db_list.lock);
 
@@ -1383,33 +1381,24 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
-		while (true) {
-			seq = read_seqcount_begin(&robj->seq);
-			rcu_read_lock();
-			fobj = rcu_dereference(robj->fence);
-			shared_count = fobj ? fobj->shared_count : 0;
-			fence = rcu_dereference(robj->fence_excl);
-			if (!read_seqcount_retry(&robj->seq, seq))
-				break;
-			rcu_read_unlock();
-		}
-
+		fence = dma_resv_excl_fence(robj);
 		if (fence)
 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
 				   fence->ops->get_driver_name(fence),
 				   fence->ops->get_timeline_name(fence),
 				   dma_fence_is_signaled(fence) ? "" : "un");
+
+		fobj = rcu_dereference_protected(robj->fence,
+						 dma_resv_held(robj));
+		shared_count = fobj ? fobj->shared_count : 0;
 		for (i = 0; i < shared_count; i++) {
-			fence = rcu_dereference(fobj->shared[i]);
-			if (!dma_fence_get_rcu(fence))
-				continue;
+			fence = rcu_dereference_protected(fobj->shared[i],
+							  dma_resv_held(robj));
 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
 				   fence->ops->get_driver_name(fence),
 				   fence->ops->get_timeline_name(fence),
 				   dma_fence_is_signaled(fence) ? "" : "un");
-			dma_fence_put(fence);
 		}
-		rcu_read_unlock();
 
 		seq_puts(s, "\tAttached Devices:\n");
 		attach_count = 0;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6ddbeb5dfbf65..f26c71747d43a 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
 /*
  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
  *
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
 	kfree_rcu(list, rcu);
 }
 
-#if IS_ENABLED(CONFIG_LOCKDEP)
-static int __init dma_resv_lockdep(void)
-{
-	struct mm_struct *mm = mm_alloc();
-	struct ww_acquire_ctx ctx;
-	struct dma_resv obj;
-	struct address_space mapping;
-	int ret;
-
-	if (!mm)
-		return -ENOMEM;
-
-	dma_resv_init(&obj);
-	address_space_init_once(&mapping);
-
-	mmap_read_lock(mm);
-	ww_acquire_init(&ctx, &reservation_ww_class);
-	ret = dma_resv_lock(&obj, &ctx);
-	if (ret == -EDEADLK)
-		dma_resv_lock_slow(&obj, &ctx);
-	fs_reclaim_acquire(GFP_KERNEL);
-	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
-	i_mmap_lock_write(&mapping);
-	i_mmap_unlock_write(&mapping);
-#ifdef CONFIG_MMU_NOTIFIER
-	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
-	__dma_fence_might_wait();
-	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
-#else
-	__dma_fence_might_wait();
-#endif
-	fs_reclaim_release(GFP_KERNEL);
-	ww_mutex_unlock(&obj.lock);
-	ww_acquire_fini(&ctx);
-	mmap_read_unlock(mm);
-	
-	mmput(mm);
-
-	return 0;
-}
-subsys_initcall(dma_resv_lockdep);
-#endif
-
 /**
  * dma_resv_init - initialize a reservation object
  * @obj: the reservation object
@@ -191,14 +149,11 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_get_list(obj);
-
+	old = dma_resv_shared_list(obj);
 	if (old && old->shared_max) {
 		if ((old->shared_count + num_fences) <= old->shared_max)
 			return 0;
-		else
-			max = max(old->shared_count + num_fences,
-				  old->shared_max * 2);
+		max = max(old->shared_count + num_fences, old->shared_max * 2);
 	} else {
 		max = max(4ul, roundup_pow_of_two(num_fences));
 	}
@@ -252,6 +207,28 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 }
 EXPORT_SYMBOL(dma_resv_reserve_shared);
 
+#ifdef CONFIG_DEBUG_MUTEXES
+/**
+ * dma_resv_reset_shared_max - reset shared fences for debugging
+ * @obj: the dma_resv object to reset
+ *
+ * Reset the number of pre-reserved shared slots to test that drivers do
+ * correct slot allocation using dma_resv_reserve_shared(). See also
+ * &dma_resv_list.shared_max.
+ */
+void dma_resv_reset_shared_max(struct dma_resv *obj)
+{
+	struct dma_resv_list *fences = dma_resv_shared_list(obj);
+
+	dma_resv_assert_held(obj);
+
+	/* Test shared fence slot reservation */
+	if (fences)
+		fences->shared_max = fences->shared_count;
+}
+EXPORT_SYMBOL(dma_resv_reset_shared_max);
+#endif
+
 /**
  * dma_resv_add_shared_fence - Add a fence to a shared slot
  * @obj: the reservation object
@@ -270,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 
 	dma_resv_assert_held(obj);
 
-	fobj = dma_resv_get_list(obj);
+	fobj = dma_resv_shared_list(obj);
 	count = fobj->shared_count;
 
 	write_seqcount_begin(&obj->seq);
@@ -307,13 +284,13 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
  */
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
-	struct dma_fence *old_fence = dma_resv_get_excl(obj);
+	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
 	struct dma_resv_list *old;
 	u32 i = 0;
 
 	dma_resv_assert_held(obj);
 
-	old = dma_resv_get_list(obj);
+	old = dma_resv_shared_list(obj);
 	if (old)
 		i = old->shared_count;
 
@@ -337,26 +314,26 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
 /**
-* dma_resv_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
+ * dma_resv_copy_fences - Copy all fences from src to dst.
+ * @dst: the destination reservation object
+ * @src: the source reservation object
+ *
+ * Copy all fences from src to dst. dst-lock must be held.
+ */
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
 	struct dma_resv_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	unsigned i;
+	unsigned int i;
 
 	dma_resv_assert_held(dst);
 
 	rcu_read_lock();
-	src_list = rcu_dereference(src->fence);
+	src_list = dma_resv_shared_list(src);
 
 retry:
 	if (src_list) {
-		unsigned shared_count = src_list->shared_count;
+		unsigned int shared_count = src_list->shared_count;
 
 		rcu_read_unlock();
 
@@ -365,7 +342,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 			return -ENOMEM;
 
 		rcu_read_lock();
-		src_list = rcu_dereference(src->fence);
+		src_list = dma_resv_shared_list(src);
 		if (!src_list || src_list->shared_count > shared_count) {
 			kfree(dst_list);
 			goto retry;
@@ -373,6 +350,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 		dst_list->shared_count = 0;
 		for (i = 0; i < src_list->shared_count; ++i) {
+			struct dma_fence __rcu **dst;
 			struct dma_fence *fence;
 
 			fence = rcu_dereference(src_list->shared[i]);
@@ -382,7 +360,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 
 			if (!dma_fence_get_rcu(fence)) {
 				dma_resv_list_free(dst_list);
-				src_list = rcu_dereference(src->fence);
+				src_list = dma_resv_shared_list(src);
 				goto retry;
 			}
 
@@ -391,7 +369,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 				continue;
 			}
 
-			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+			dst = &dst_list->shared[dst_list->shared_count++];
+			rcu_assign_pointer(*dst, fence);
 		}
 	} else {
 		dst_list = NULL;
@@ -400,8 +379,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
-	src_list = dma_resv_get_list(dst);
-	old = dma_resv_get_excl(dst);
+	src_list = dma_resv_shared_list(dst);
+	old = dma_resv_excl_fence(dst);
 
 	write_seqcount_begin(&dst->seq);
 	/* write_seqcount_begin provides the necessary memory barrier */
@@ -417,7 +396,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 EXPORT_SYMBOL(dma_resv_copy_fences);
 
 /**
- * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
  * @pfence_excl: the returned exclusive fence (or NULL)
@@ -429,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
-			    struct dma_fence **pfence_excl,
-			    unsigned *pshared_count,
-			    struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+			unsigned int *pshared_count,
+			struct dma_fence ***pshared)
 {
 	struct dma_fence **shared = NULL;
 	struct dma_fence *fence_excl;
@@ -449,11 +427,11 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 		rcu_read_lock();
 		seq = read_seqcount_begin(&obj->seq);
 
-		fence_excl = rcu_dereference(obj->fence_excl);
+		fence_excl = dma_resv_excl_fence(obj);
 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 
-		fobj = rcu_dereference(obj->fence);
+		fobj = dma_resv_shared_list(obj);
 		if (fobj)
 			sz += sizeof(*shared) * fobj->shared_max;
 
@@ -515,27 +493,28 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
 	*pshared = shared;
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences);
 
 /**
- * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout - Wait on reservation's objects
  * shared and/or exclusive fences.
  * @obj: the reservation object
  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
  * @intr: if true, do interruptible wait
  * @timeout: timeout value in jiffies or zero to return immediately
  *
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
  * RETURNS
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than zer on success.
  */
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
-			       bool wait_all, bool intr,
-			       unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+			   unsigned long timeout)
 {
-	struct dma_fence *fence;
-	unsigned seq, shared_count;
 	long ret = timeout ? timeout : 1;
+	unsigned int seq, shared_count;
+	struct dma_fence *fence;
 	int i;
 
 retry:
@@ -544,7 +523,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	rcu_read_lock();
 	i = -1;
 
-	fence = rcu_dereference(obj->fence_excl);
+	fence = dma_resv_excl_fence(obj);
 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		if (!dma_fence_get_rcu(fence))
 			goto unlock_retry;
@@ -559,14 +538,15 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	}
 
 	if (wait_all) {
-		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
 
 		if (fobj)
 			shared_count = fobj->shared_count;
 
 		for (i = 0; !fence && i < shared_count; ++i) {
-			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *lfence;
 
+			lfence = rcu_dereference(fobj->shared[i]);
 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 				     &lfence->flags))
 				continue;
@@ -602,7 +582,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 	rcu_read_unlock();
 	goto retry;
 }
-EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
 
 
 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
@@ -622,18 +602,20 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 }
 
 /**
- * dma_resv_test_signaled_rcu - Test if a reservation object's
- * fences have been signaled.
+ * dma_resv_test_signaled - Test if a reservation object's fences have been
+ * signaled.
  * @obj: the reservation object
  * @test_all: if true, test all fences, otherwise only test the exclusive
  * fence
  *
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
  * RETURNS
  * true if all fences signaled, else false
  */
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
 {
-	unsigned seq, shared_count;
+	unsigned int seq, shared_count;
 	int ret;
 
 	rcu_read_lock();
@@ -643,16 +625,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	seq = read_seqcount_begin(&obj->seq);
 
 	if (test_all) {
-		unsigned i;
-
-		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
+		unsigned int i;
 
 		if (fobj)
 			shared_count = fobj->shared_count;
 
 		for (i = 0; i < shared_count; ++i) {
-			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence;
 
+			fence = rcu_dereference(fobj->shared[i]);
 			ret = dma_resv_test_signaled_single(fence);
 			if (ret < 0)
 				goto retry;
@@ -665,7 +647,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	}
 
 	if (!shared_count) {
-		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+		struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
 
 		if (fence_excl) {
 			ret = dma_resv_test_signaled_single(fence_excl);
@@ -680,4 +662,47 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 	rcu_read_unlock();
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+	struct mm_struct *mm = mm_alloc();
+	struct ww_acquire_ctx ctx;
+	struct dma_resv obj;
+	struct address_space mapping;
+	int ret;
+
+	if (!mm)
+		return -ENOMEM;
+
+	dma_resv_init(&obj);
+	address_space_init_once(&mapping);
+
+	mmap_read_lock(mm);
+	ww_acquire_init(&ctx, &reservation_ww_class);
+	ret = dma_resv_lock(&obj, &ctx);
+	if (ret == -EDEADLK)
+		dma_resv_lock_slow(&obj, &ctx);
+	fs_reclaim_acquire(GFP_KERNEL);
+	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+	i_mmap_lock_write(&mapping);
+	i_mmap_unlock_write(&mapping);
+#ifdef CONFIG_MMU_NOTIFIER
+	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+	__dma_fence_might_wait();
+	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+#else
+	__dma_fence_might_wait();
+#endif
+	fs_reclaim_release(GFP_KERNEL);
+	ww_mutex_unlock(&obj.lock);
+	ww_acquire_fini(&ctx);
+	mmap_read_unlock(mm);
+
+	mmput(mm);
+
+	return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 594ac6c3a1f4b..7ff89690a976a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -98,8 +98,8 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
 config DRM_FBDEV_EMULATION
 	bool "Enable legacy fbdev support for your modesetting driver"
 	depends on DRM
+	depends on FB
 	select DRM_KMS_HELPER
-	select FB
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
@@ -379,6 +379,19 @@ source "drivers/gpu/drm/xlnx/Kconfig"
 
 source "drivers/gpu/drm/gud/Kconfig"
 
+config DRM_HYPERV
+	tristate "DRM Support for Hyper-V synthetic video device"
+	depends on DRM && PCI && MMU && HYPERV
+	select DRM_KMS_HELPER
+	select DRM_GEM_SHMEM_HELPER
+	help
+	 This is a KMS driver for Hyper-V synthetic video device. Choose this
+	 option if you would like to enable drm driver for Hyper-V virtual
+	 machine. Unselect Hyper-V framebuffer driver (CONFIG_FB_HYPERV) so
+	 that DRM driver is used by default.
+
+	 If M is selected the module will be called hyperv_drm.
+
 # Keep legacy drivers last
 
 menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a91cc76849046..a118692a6df77 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -126,3 +126,4 @@ obj-$(CONFIG_DRM_MCDE) += mcde/
 obj-$(CONFIG_DRM_TIDSS) += tidss/
 obj-y			+= xlnx/
 obj-y			+= gud/
+obj-$(CONFIG_DRM_HYPERV) += hyperv/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 141cd297e948f..668a28b80a62b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	if (!ef)
 		return -EINVAL;
 
-	old = dma_resv_get_list(resv);
+	old = dma_resv_shared_list(resv);
 	if (!old)
 		return 0;
 
@@ -1668,7 +1668,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	 * the next restore worker
 	 */
 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
-	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+	    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
 		is_invalid_userptr = true;
 
 	ret = vm_validate_pt_pd_bos(avm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0585442b000ee..736b8c1219793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4127,9 +4127,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
 
 		/* No need to recover an evicted BO */
-		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
-		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
-		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
 			continue;
 
 		r = amdgpu_bo_restore_shadow(shadow, &next);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 49f73b5b89b00..ac7b37dfff5e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto unpin;
 	}
 
-	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
-					      &work->shared_count,
-					      &work->shared);
+	r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+				&work->shared_count, &work->shared);
 	if (unlikely(r != 0)) {
 		DRM_ERROR("failed to get fences for buffer\n");
 		goto unpin;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 6ec1312b73898..c3053b83b80cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -49,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
 	unsigned int count;
 	int r;
 
-	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+	if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
 		return 0;
 
-	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+	r = dma_resv_get_fences(obj, NULL, &count, &fences);
 	if (r)
 		return r;
 
@@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 		if (r)
 			return ERR_PTR(r);
 
-	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
 		     AMDGPU_GEM_DOMAIN_GTT)) {
 		return ERR_PTR(-EBUSY);
 	}
 
-	switch (bo->tbo.mem.mem_type) {
+	switch (bo->tbo.resource->mem_type) {
 	case TTM_PL_TT:
 		sgt = drm_prime_pages_to_sg(obj->dev,
 					    bo->tbo.ttm->pages,
@@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 		break;
 
 	case TTM_PL_VRAM:
-		r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
-				bo->tbo.base.size, attach->dev, dir, &sgt);
+		r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+					      bo->tbo.base.size, attach->dev,
+					      dir, &sgt);
 		if (r)
 			return ERR_PTR(r);
 		break;
@@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 	struct amdgpu_vm_bo_base *bo_base;
 	int r;
 
-	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+	if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
 		return;
 
 	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 73c76a3e2b12a..1c3e3b608332f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 	if (!amdgpu_vm_ready(vm))
 		goto out_unlock;
 
-	fence = dma_resv_get_excl(bo->tbo.base.resv);
+	fence = dma_resv_excl_fence(bo->tbo.base.resv);
 	if (fence) {
 		amdgpu_bo_fence(bo, fence, true);
 		fence = NULL;
@@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
-						  timeout);
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
 
 	/* ret == 0 means not signaled,
 	 * ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 7061c4a0e421d..85b8bfb8857b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
-	switch (bo->tbo.mem.mem_type) {
+	switch (bo->tbo.resource->mem_type) {
 	case TTM_PL_TT:
 		*addr = bo->tbo.ttm->dma_address[0];
 		break;
@@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 		*addr = 0;
 		break;
 	}
-	*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
+	*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
 	amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 9ab33048923e7..ec96e0b26b116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,18 +22,26 @@
  * Authors: Christian König
  */
 
+#include <drm/ttm/ttm_range_manager.h>
+
 #include "amdgpu.h"
 
+struct amdgpu_gtt_node {
+	struct ttm_buffer_object *tbo;
+	struct ttm_range_mgr_node base;
+};
+
 static inline struct amdgpu_gtt_mgr *
 to_gtt_mgr(struct ttm_resource_manager *man)
 {
 	return container_of(man, struct amdgpu_gtt_mgr, manager);
 }
 
-struct amdgpu_gtt_node {
-	struct drm_mm_node node;
-	struct ttm_buffer_object *tbo;
-};
+static inline struct amdgpu_gtt_node *
+to_amdgpu_gtt_node(struct ttm_resource *res)
+{
+	return container_of(res, struct amdgpu_gtt_node, base.base);
+}
 
 /**
  * DOC: mem_info_gtt_total
@@ -93,13 +101,15 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
 /**
  * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
  *
- * @mem: the mem object to check
+ * @res: the mem object to check
  *
  * Check if a mem object has already address space allocated.
  */
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 {
-	return mem->mm_node != NULL;
+	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+
+	return drm_mm_node_allocated(&node->base.mm_nodes[0]);
 }
 
 /**
@@ -115,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
 static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 			      struct ttm_buffer_object *tbo,
 			      const struct ttm_place *place,
-			      struct ttm_resource *mem)
+			      struct ttm_resource **res)
 {
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+	uint32_t num_pages = PFN_UP(tbo->base.size);
 	struct amdgpu_gtt_node *node;
 	int r;
 
 	spin_lock(&mgr->lock);
-	if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
-	    atomic64_read(&mgr->available) < mem->num_pages) {
+	if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
+	    atomic64_read(&mgr->available) < num_pages) {
 		spin_unlock(&mgr->lock);
 		return -ENOSPC;
 	}
-	atomic64_sub(mem->num_pages, &mgr->available);
+	atomic64_sub(num_pages, &mgr->available);
 	spin_unlock(&mgr->lock);
 
-	if (!place->lpfn) {
-		mem->mm_node = NULL;
-		mem->start = AMDGPU_BO_INVALID_OFFSET;
-		return 0;
-	}
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
 	if (!node) {
 		r = -ENOMEM;
 		goto err_out;
 	}
 
 	node->tbo = tbo;
+	ttm_resource_init(tbo, place, &node->base.base);
 
-	spin_lock(&mgr->lock);
-	r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
-					tbo->page_alignment, 0, place->fpfn,
-					place->lpfn, DRM_MM_INSERT_BEST);
-	spin_unlock(&mgr->lock);
-
-	if (unlikely(r))
-		goto err_free;
-
-	mem->mm_node = node;
-	mem->start = node->node.start;
+	if (place->lpfn) {
+		spin_lock(&mgr->lock);
+		r = drm_mm_insert_node_in_range(&mgr->mm,
+						&node->base.mm_nodes[0],
+						num_pages, tbo->page_alignment,
+						0, place->fpfn, place->lpfn,
+						DRM_MM_INSERT_BEST);
+		spin_unlock(&mgr->lock);
+		if (unlikely(r))
+			goto err_free;
+
+		node->base.base.start = node->base.mm_nodes[0].start;
+	} else {
+		node->base.mm_nodes[0].start = 0;
+		node->base.mm_nodes[0].size = node->base.base.num_pages;
+		node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+	}
 
+	*res = &node->base.base;
 	return 0;
 
 err_free:
 	kfree(node);
 
 err_out:
-	atomic64_add(mem->num_pages, &mgr->available);
+	atomic64_add(num_pages, &mgr->available);
 
 	return r;
 }
@@ -176,19 +189,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
  * Free the allocated GTT again.
  */
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
-			       struct ttm_resource *mem)
+			       struct ttm_resource *res)
 {
+	struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
 	struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-	struct amdgpu_gtt_node *node = mem->mm_node;
 
-	if (node) {
-		spin_lock(&mgr->lock);
-		drm_mm_remove_node(&node->node);
-		spin_unlock(&mgr->lock);
-		kfree(node);
-	}
+	spin_lock(&mgr->lock);
+	if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
+		drm_mm_remove_node(&node->base.mm_nodes[0]);
+	spin_unlock(&mgr->lock);
+	atomic64_add(res->num_pages, &mgr->available);
 
-	atomic64_add(mem->num_pages, &mgr->available);
+	kfree(node);
 }
 
 /**
@@ -224,7 +236,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 	adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
 	spin_lock(&mgr->lock);
 	drm_mm_for_each_node(mm_node, &mgr->mm) {
-		node = container_of(mm_node, struct amdgpu_gtt_node, node);
+		node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
 		r = amdgpu_ttm_recover_gart(node->tbo);
 		if (r)
 			break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index c7f3aae23c625..b7fb72bff2c13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	unsigned count;
 	int r;
 
-	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+	r = dma_resv_get_fences(resv, NULL, &count, &fences);
 	if (r)
 		goto fallback;
 
@@ -156,8 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 	/* Not enough memory for the delayed delete, as last resort
 	 * block for all the fences to complete.
 	 */
-	dma_resv_wait_timeout_rcu(resv, true, false,
-					    MAX_SCHEDULE_TIMEOUT);
+	dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
 	amdgpu_pasid_free(pasid);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 2741c28ff1b56..d6c54c7f76799 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
 
 	mmu_interval_set_seq(mni, cur_seq);
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
-				      MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+				  MAX_SCHEDULE_TIMEOUT);
 	mutex_unlock(&adev->notifier_lock);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a6fa3968463ae..c83f116edde22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -364,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 	if (cpu_addr)
 		amdgpu_bo_kunmap(*bo_ptr);
 
-	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 
 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 	}
 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
-			     &(*bo_ptr)->tbo.mem, &ctx);
+			     &(*bo_ptr)->tbo.resource, &ctx);
 	if (r)
 		goto error;
 
@@ -575,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 		return r;
 
 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
-	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
-	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+	    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+	    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 					     ctx.bytes_moved);
 	else
 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
-	    bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 		struct dma_fence *fence;
 
 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -777,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 		return 0;
 	}
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
-						MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+				  MAX_SCHEDULE_TIMEOUT);
 	if (r < 0)
 		return r;
 
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
 	if (r)
 		return r;
 
@@ -905,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 
 	if (bo->tbo.pin_count) {
-		uint32_t mem_type = bo->tbo.mem.mem_type;
-		uint32_t mem_flags = bo->tbo.mem.placement;
+		uint32_t mem_type = bo->tbo.resource->mem_type;
+		uint32_t mem_flags = bo->tbo.resource->placement;
 
 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 			return -EINVAL;
@@ -956,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 	ttm_bo_pin(&bo->tbo);
 
-	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -1008,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
 	if (bo->tbo.base.import_attach)
 		dma_buf_unpin(bo->tbo.base.import_attach);
 
-	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
 			     &adev->visible_pin_size);
-	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
 	}
 }
@@ -1245,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
-	struct ttm_resource *old_mem = &bo->mem;
+	struct ttm_resource *old_mem = bo->resource;
 
 	if (!amdgpu_bo_is_amdgpu_bo(bo))
 		return;
@@ -1256,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 	amdgpu_bo_kunmap(abo);
 
 	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
-	    bo->mem.mem_type != TTM_PL_SYSTEM)
+	    bo->resource->mem_type != TTM_PL_SYSTEM)
 		dma_buf_move_notify(abo->tbo.base.dma_buf);
 
 	/* remember the eviction */
@@ -1276,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
 {
 	unsigned int domain;
 
-	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 	switch (domain) {
 	case AMDGPU_GEM_DOMAIN_VRAM:
 		*vram_mem += amdgpu_bo_size(bo);
@@ -1318,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 	if (bo->base.resv == &bo->base._resv)
 		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 
-	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+	if (bo->resource->mem_type != TTM_PL_VRAM ||
 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 		return;
 
@@ -1355,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	/* Remember that this BO was accessed by the CPU */
 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
-	if (bo->mem.mem_type != TTM_PL_VRAM)
+	if (bo->resource->mem_type != TTM_PL_VRAM)
 		return 0;
 
-	offset = bo->mem.start << PAGE_SHIFT;
+	offset = bo->resource->start << PAGE_SHIFT;
 	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
 		return 0;
 
@@ -1381,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	else if (unlikely(r))
 		return VM_FAULT_SIGBUS;
 
-	offset = bo->mem.start << PAGE_SHIFT;
+	offset = bo->resource->start << PAGE_SHIFT;
 	/* this should never happen */
-	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	if (bo->resource->mem_type == TTM_PL_VRAM &&
 	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
 		return VM_FAULT_SIGBUS;
 
@@ -1468,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
  */
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
-	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
 		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
-	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
-	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
 	return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1490,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	uint64_t offset;
 
-	offset = (bo->tbo.mem.start << PAGE_SHIFT) +
-		 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+	offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+		 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
 	return amdgpu_gmc_sign_extend(offset);
 }
@@ -1544,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
 	unsigned int pin_count;
 	u64 size;
 
-	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
 	switch (domain) {
 	case AMDGPU_GEM_DOMAIN_VRAM:
 		placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 90eab1c310271..126df03a70666 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -223,10 +223,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_res_cursor cursor;
 
-	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+	if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
 		return false;
 
-	amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
+	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
 	while (cursor.remaining) {
 		if (cursor.start < adev->gmc.visible_vram_size)
 			return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d607f314cc1b5..f6aff7ce51606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -66,14 +66,18 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
 static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
 				  struct ttm_buffer_object *tbo,
 				  const struct ttm_place *place,
-				  struct ttm_resource *mem)
+				  struct ttm_resource **res)
 {
 	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 
-	atomic64_add(mem->num_pages, &mgr->used);
+	*res = kzalloc(sizeof(**res), GFP_KERNEL);
+	if (*res)
+		return -ENOMEM;
 
-	mem->mm_node = NULL;
-	mem->start = AMDGPU_BO_INVALID_OFFSET;
+	ttm_resource_init(tbo, place, *res);
+	(*res)->start = AMDGPU_BO_INVALID_OFFSET;
+
+	atomic64_add((*res)->num_pages, &mgr->used);
 	return 0;
 }
 
@@ -86,11 +90,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
  * Free the allocated GTT again.
  */
 static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
-				   struct ttm_resource *mem)
+				   struct ttm_resource *res)
 {
 	struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 
-	atomic64_sub(mem->num_pages, &mgr->used);
+	atomic64_sub(res->num_pages, &mgr->used);
+	kfree(res);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 40f2adf305bc2..59e0fefb15aa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -28,6 +28,7 @@
 
 #include <drm/drm_mm.h>
 #include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 /* state back for walking over vram_mgr and gtt_mgr allocations */
 struct amdgpu_res_cursor {
@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 {
 	struct drm_mm_node *node;
 
-	if (!res || !res->mm_node) {
+	if (!res) {
 		cur->start = start;
 		cur->size = size;
 		cur->remaining = size;
@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 
 	BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 
-	node = res->mm_node;
+	node = to_ttm_range_mgr_node(res)->mm_nodes;
 	while (start >= node->size << PAGE_SHIFT)
 		start -= node++->size << PAGE_SHIFT;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2b..1b2ceccaf5b0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		return -EINVAL;
 
 	/* always sync to the exclusive fence */
-	f = dma_resv_get_excl(resv);
+	f = dma_resv_excl_fence(resv);
 	r = amdgpu_sync_fence(sync, f);
 
-	flist = dma_resv_get_list(resv);
+	flist = dma_resv_shared_list(resv);
 	if (!flist || r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 792d202618465..0527772fe1b80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.mem.num_pages;
-			   __entry->type = bo->tbo.mem.mem_type;
+			   __entry->pages = bo->tbo.resource->num_pages;
+			   __entry->type = bo->tbo.resource->mem_type;
 			   __entry->prefer = bo->preferred_domains;
 			   __entry->allow = bo->allowed_domains;
 			   __entry->visible = bo->flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 832970cff64c0..5d0c57b9917f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -45,6 +45,7 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 #include <drm/amdgpu_drm.h>
 
@@ -125,7 +126,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		rcu_read_unlock();
 		return;
 	}
-	switch (bo->mem.mem_type) {
+
+	switch (bo->resource->mem_type) {
 	case AMDGPU_PL_GDS:
 	case AMDGPU_PL_GWS:
 	case AMDGPU_PL_OA:
@@ -460,7 +462,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 {
 	struct amdgpu_device *adev;
 	struct amdgpu_bo *abo;
-	struct ttm_resource *old_mem = &bo->mem;
+	struct ttm_resource *old_mem = bo->resource;
 	int r;
 
 	if (new_mem->mem_type == TTM_PL_TT ||
@@ -495,7 +497,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 			return r;
 
 		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
-		ttm_resource_free(bo, &bo->mem);
+		ttm_resource_free(bo, &bo->resource);
 		ttm_bo_assign_mem(bo, new_mem);
 		goto out;
 	}
@@ -605,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_res_cursor cursor;
 
-	amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+	amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+			 &cursor);
 	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
 }
 
@@ -954,50 +957,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct ttm_operation_ctx ctx = { false, false };
 	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-	struct ttm_resource tmp;
 	struct ttm_placement placement;
 	struct ttm_place placements;
+	struct ttm_resource *tmp;
 	uint64_t addr, flags;
 	int r;
 
-	if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+	if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
 		return 0;
 
 	addr = amdgpu_gmc_agp_addr(bo);
 	if (addr != AMDGPU_BO_INVALID_OFFSET) {
-		bo->mem.start = addr >> PAGE_SHIFT;
-	} else {
+		bo->resource->start = addr >> PAGE_SHIFT;
+		return 0;
+	}
 
-		/* allocate GART space */
-		placement.num_placement = 1;
-		placement.placement = &placements;
-		placement.num_busy_placement = 1;
-		placement.busy_placement = &placements;
-		placements.fpfn = 0;
-		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
-		placements.mem_type = TTM_PL_TT;
-		placements.flags = bo->mem.placement;
-
-		r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
-		if (unlikely(r))
-			return r;
+	/* allocate GART space */
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements.fpfn = 0;
+	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+	placements.mem_type = TTM_PL_TT;
+	placements.flags = bo->resource->placement;
 
-		/* compute PTE flags for this buffer object */
-		flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+	if (unlikely(r))
+		return r;
 
-		/* Bind pages */
-		gtt->offset = (u64)tmp.start << PAGE_SHIFT;
-		r = amdgpu_ttm_gart_bind(adev, bo, flags);
-		if (unlikely(r)) {
-			ttm_resource_free(bo, &tmp);
-			return r;
-		}
+	/* compute PTE flags for this buffer object */
+	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
 
-		amdgpu_gart_invalidate_tlb(adev);
-		ttm_resource_free(bo, &bo->mem);
-		bo->mem = tmp;
+	/* Bind pages */
+	gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+	r = amdgpu_ttm_gart_bind(adev, bo, flags);
+	if (unlikely(r)) {
+		ttm_resource_free(bo, &tmp);
+		return r;
 	}
 
+	amdgpu_gart_invalidate_tlb(adev);
+	ttm_resource_free(bo, &bo->resource);
+	ttm_bo_assign_mem(bo, tmp);
+
 	return 0;
 }
 
@@ -1016,7 +1019,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 	if (!tbo->ttm)
 		return 0;
 
-	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
 	r = amdgpu_ttm_gart_bind(adev, tbo, flags);
 
 	return r;
@@ -1330,12 +1333,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 					    const struct ttm_place *place)
 {
-	unsigned long num_pages = bo->mem.num_pages;
+	unsigned long num_pages = bo->resource->num_pages;
 	struct amdgpu_res_cursor cursor;
 	struct dma_resv_list *flist;
 	struct dma_fence *f;
 	int i;
 
+	/* Swapout? */
+	if (bo->resource->mem_type == TTM_PL_SYSTEM)
+		return true;
+
 	if (bo->type == ttm_bo_type_kernel &&
 	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
 		return false;
@@ -1344,7 +1351,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	 * If true, then return false as any KFD process needs all its BOs to
 	 * be resident to run successfully
 	 */
-	flist = dma_resv_get_list(bo->base.resv);
+	flist = dma_resv_shared_list(bo->base.resv);
 	if (flist) {
 		for (i = 0; i < flist->shared_count; ++i) {
 			f = rcu_dereference_protected(flist->shared[i],
@@ -1354,7 +1361,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 		}
 	}
 
-	switch (bo->mem.mem_type) {
+	switch (bo->resource->mem_type) {
 	case AMDGPU_PL_PREEMPT:
 		/* Preemptible BOs don't own system resources managed by the
 		 * driver (pages, VRAM, GART space). They point to resources
@@ -1372,7 +1379,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 
 	case TTM_PL_VRAM:
 		/* Check each drm MM node individually */
-		amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+		amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
 				 &cursor);
 		while (cursor.remaining) {
 			if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
@@ -1414,10 +1421,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 	uint32_t value = 0;
 	int ret = 0;
 
-	if (bo->mem.mem_type != TTM_PL_VRAM)
+	if (bo->resource->mem_type != TTM_PL_VRAM)
 		return -EIO;
 
-	amdgpu_res_first(&bo->mem, offset, len, &cursor);
+	amdgpu_res_first(bo->resource, offset, len, &cursor);
 	while (cursor.remaining) {
 		uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
 		uint64_t bytes = 4 - (cursor.start & 3);
@@ -1942,21 +1949,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		return -EINVAL;
 	}
 
-	if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
+	if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
 		DRM_ERROR("Trying to clear preemptible memory.\n");
 		return -EINVAL;
 	}
 
-	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+	if (bo->tbo.resource->mem_type == TTM_PL_TT) {
 		r = amdgpu_ttm_alloc_gart(&bo->tbo);
 		if (r)
 			return r;
 	}
 
-	num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
+	num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
 	num_loops = 0;
 
-	amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+	amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
 	while (cursor.remaining) {
 		num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
 		amdgpu_res_next(&cursor, cursor.size);
@@ -1981,12 +1988,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		}
 	}
 
-	amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+	amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
 	while (cursor.remaining) {
 		uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
 		uint64_t dst_addr = cursor.start;
 
-		dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+		dst_addr += amdgpu_ttm_domain_start(adev,
+						    bo->tbo.resource->mem_type);
 		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
 					cur_size);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index ce8f80a959c60..0f576f294d8a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1125,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ib->length_dw = 16;
 
 	if (direct) {
-		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
-							true, false,
-							msecs_to_jiffies(10));
+		r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+					  msecs_to_jiffies(10));
 		if (r == 0)
 			r = -ETIMEDOUT;
 		if (r < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8d218c5cfee84..777e8922ecf33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 		amdgpu_vm_bo_idle(base);
 
 	if (bo->preferred_domains &
-	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
 		return;
 
 	/*
@@ -658,10 +658,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 		if (!bo->parent)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+		ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
 					&vm->lru_bulk_move);
 		if (shadow)
-			ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
+			ttm_bo_move_to_lru_tail(&shadow->tbo,
+						shadow->tbo.resource,
 						&vm->lru_bulk_move);
 	}
 	spin_unlock(&adev->mman.bdev.lru_lock);
@@ -1858,10 +1859,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 			struct drm_gem_object *gobj = dma_buf->priv;
 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 
-			if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+			if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
 				bo = gem_to_amdgpu_bo(gobj);
 		}
-		mem = &bo->tbo.mem;
+		mem = bo->tbo.resource;
 		if (mem->mem_type == TTM_PL_TT ||
 		    mem->mem_type == AMDGPU_PL_PREEMPT)
 			pages_addr = bo->tbo.ttm->dma_address;
@@ -1922,7 +1923,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 	 * next command submission.
 	 */
 	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
-		uint32_t mem_type = bo->tbo.mem.mem_type;
+		uint32_t mem_type = bo->tbo.resource->mem_type;
 
 		if (!(bo->preferred_domains &
 		      amdgpu_mem_type_to_domain(mem_type)))
@@ -2063,13 +2064,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	unsigned i, shared_count;
 	int r;
 
-	r = dma_resv_get_fences_rcu(resv, &excl,
-					      &shared_count, &shared);
+	r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
 	if (r) {
 		/* Not enough memory to grab the fence list, as last resort
 		 * block for all the fences to complete.
 		 */
-		dma_resv_wait_timeout_rcu(resv, true, false,
+		dma_resv_wait_timeout(resv, true, false,
 						    MAX_SCHEDULE_TIMEOUT);
 		return;
 	}
@@ -2681,7 +2681,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 		return true;
 
 	/* Don't evict VM page tables while they are busy */
-	if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
 		return false;
 
 	/* Try to block ongoing updates */
@@ -2861,8 +2861,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-	timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
-					    true, true, timeout);
+	timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+					true, timeout);
 	if (timeout <= 0)
 		return timeout;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07e007dbff7c0..9a6df02477cea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,8 @@
  */
 
 #include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_range_manager.h>
+
 #include "amdgpu.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_res_cursor.h"
@@ -217,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-	struct ttm_resource *mem = &bo->tbo.mem;
-	struct drm_mm_node *nodes = mem->mm_node;
-	unsigned pages = mem->num_pages;
+	struct ttm_resource *res = bo->tbo.resource;
+	unsigned pages = res->num_pages;
+	struct drm_mm_node *mm;
 	u64 usage;
 
 	if (amdgpu_gmc_vram_full_visible(&adev->gmc))
 		return amdgpu_bo_size(bo);
 
-	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
 		return 0;
 
-	for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
-		usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+	mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+	for (usage = 0; pages; pages -= mm->size, mm++)
+		usage += amdgpu_vram_mgr_vis_size(adev, mm);
 
 	return usage;
 }
@@ -365,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 			       struct ttm_buffer_object *tbo,
 			       const struct ttm_place *place,
-			       struct ttm_resource *mem)
+			       struct ttm_resource **res)
 {
 	unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
 	uint64_t vis_usage = 0, mem_bytes, max_bytes;
+	struct ttm_range_mgr_node *node;
 	struct drm_mm *mm = &mgr->mm;
 	enum drm_mm_insert_mode mode;
-	struct drm_mm_node *nodes;
 	unsigned i;
 	int r;
 
@@ -386,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 		max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 
 	/* bail out quickly if there's likely not enough VRAM for this BO */
-	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
+	mem_bytes = tbo->base.size;
 	if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
-		atomic64_sub(mem_bytes, &mgr->usage);
-		return -ENOSPC;
+		r = -ENOSPC;
+		goto error_sub;
 	}
 
 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -404,22 +407,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 #endif
 		pages_per_node = max_t(uint32_t, pages_per_node,
 				       tbo->page_alignment);
-		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+		num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
 	}
 
-	nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
-			       GFP_KERNEL | __GFP_ZERO);
-	if (!nodes) {
-		atomic64_sub(mem_bytes, &mgr->usage);
-		return -ENOMEM;
+	node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+			GFP_KERNEL | __GFP_ZERO);
+	if (!node) {
+		r = -ENOMEM;
+		goto error_sub;
 	}
 
+	ttm_resource_init(tbo, place, &node->base);
+
 	mode = DRM_MM_INSERT_BEST;
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		mode = DRM_MM_INSERT_HIGH;
 
-	mem->start = 0;
-	pages_left = mem->num_pages;
+	pages_left = node->base.num_pages;
 
 	/* Limit maximum size to 2GB due to SG table limitations */
 	pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 		if (pages >= pages_per_node)
 			alignment = pages_per_node;
 
-		r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
-						0, place->fpfn, lpfn, mode);
+		r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+						alignment, 0, place->fpfn,
+						lpfn, mode);
 		if (unlikely(r)) {
 			if (pages > pages_per_node) {
 				if (is_power_of_2(pages))
@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 					pages = rounddown_pow_of_two(pages);
 				continue;
 			}
-			goto error;
+			goto error_free;
 		}
 
-		vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
-		amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+		vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+		amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
 		pages_left -= pages;
 		++i;
 
@@ -456,19 +461,20 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	spin_unlock(&mgr->lock);
 
 	if (i == 1)
-		mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+		node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
 
 	atomic64_add(vis_usage, &mgr->vis_usage);
-	mem->mm_node = nodes;
+	*res = &node->base;
 	return 0;
 
-error:
+error_free:
 	while (i--)
-		drm_mm_remove_node(&nodes[i]);
+		drm_mm_remove_node(&node->mm_nodes[i]);
 	spin_unlock(&mgr->lock);
-	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+	kvfree(node);
 
-	kvfree(nodes);
+error_sub:
+	atomic64_sub(mem_bytes, &mgr->usage);
 	return r;
 }
 
@@ -481,24 +487,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
  * Free the allocated VRAM again.
  */
 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
-				struct ttm_resource *mem)
+				struct ttm_resource *res)
 {
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
-	struct drm_mm_node *nodes = mem->mm_node;
 	uint64_t usage = 0, vis_usage = 0;
-	unsigned pages = mem->num_pages;
-
-	if (!mem->mm_node)
-		return;
+	unsigned i, pages;
 
 	spin_lock(&mgr->lock);
-	while (pages) {
-		pages -= nodes->size;
-		drm_mm_remove_node(nodes);
-		usage += nodes->size << PAGE_SHIFT;
-		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
-		++nodes;
+	for (i = 0, pages = res->num_pages; pages;
+	     pages -= node->mm_nodes[i].size, ++i) {
+		struct drm_mm_node *mm = &node->mm_nodes[i];
+
+		drm_mm_remove_node(mm);
+		usage += mm->size << PAGE_SHIFT;
+		vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
 	}
 	amdgpu_vram_mgr_do_reserve(man);
 	spin_unlock(&mgr->lock);
@@ -506,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
 	atomic64_sub(usage, &mgr->usage);
 	atomic64_sub(vis_usage, &mgr->vis_usage);
 
-	kvfree(mem->mm_node);
-	mem->mm_node = NULL;
+	kvfree(node);
 }
 
 /**
@@ -524,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
  * Allocate and fill a sg table from a VRAM allocation.
  */
 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
-			      struct ttm_resource *mem,
+			      struct ttm_resource *res,
 			      u64 offset, u64 length,
 			      struct device *dev,
 			      enum dma_data_direction dir,
@@ -540,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 		return -ENOMEM;
 
 	/* Determine the number of DRM_MM nodes to export */
-	amdgpu_res_first(mem, offset, length, &cursor);
+	amdgpu_res_first(res, offset, length, &cursor);
 	while (cursor.remaining) {
 		num_entries++;
 		amdgpu_res_next(&cursor, cursor.size);
@@ -560,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 	 * and the number of bytes from it. Access the following
 	 * DRM_MM node(s) if more buffer needs to exported
 	 */
-	amdgpu_res_first(mem, offset, length, &cursor);
+	amdgpu_res_first(res, offset, length, &cursor);
 	for_each_sgtable_sg((*sgt), sg, i) {
 		phys_addr_t phys = cursor.start + adev->gmc.aper_base;
 		size_t size = cursor.size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de28..2660f03e63a7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
 #include "kfd_priv.h"
 #include "kfd_svm.h"
 #include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
 	return r;
 }
 
-static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
-			       struct drm_mm_node **mm_node, uint64_t *offset)
-{
-	struct drm_mm_node *node = *mm_node;
-	uint64_t pos = *offset;
-
-	if (node->start == AMDGPU_BO_INVALID_OFFSET) {
-		pr_debug("drm node is not validated\n");
-		return 0;
-	}
-
-	pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
-		 node->size);
-
-	if (pos >= node->size) {
-		do  {
-			pos -= node->size;
-			node++;
-		} while (pos >= node->size);
-
-		*mm_node = node;
-		*offset = pos;
-	}
-
-	return (node->start + pos) << PAGE_SHIFT;
-}
-
 unsigned long
 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
 {
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 {
 	uint64_t npages = migrate->cpages;
 	struct device *dev = adev->dev;
-	struct drm_mm_node *node;
+	struct amdgpu_res_cursor cursor;
 	dma_addr_t *src;
 	uint64_t *dst;
-	uint64_t vram_addr;
-	uint64_t offset;
 	uint64_t i, j;
 	int r;
 
@@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 		goto out;
 	}
 
-	node = prange->ttm_res->mm_node;
-	offset = prange->offset;
-	vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
-	if (!vram_addr) {
-		WARN_ONCE(1, "vram node address is 0\n");
-		r = -ENOMEM;
-		goto out;
-	}
-
+	amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+			 npages << PAGE_SHIFT, &cursor);
 	for (i = j = 0; i < npages; i++) {
 		struct page *spage;
 
-		dst[i] = vram_addr + (j << PAGE_SHIFT);
+		dst[i] = cursor.start + (j << PAGE_SHIFT);
 		migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
 		svm_migrate_get_vram_page(prange, migrate->dst[i]);
 
@@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 						mfence);
 				if (r)
 					goto out_free_vram_pages;
-				offset += j;
-				vram_addr = (node->start + offset) << PAGE_SHIFT;
+				amdgpu_res_next(&cursor, j << PAGE_SHIFT);
 				j = 0;
 			} else {
-				offset++;
-				vram_addr += PAGE_SIZE;
-			}
-			if (offset >= node->size) {
-				node++;
-				pr_debug("next node size 0x%llx\n", node->size);
-				vram_addr = node->start << PAGE_SHIFT;
-				offset = 0;
+				amdgpu_res_next(&cursor, PAGE_SIZE);
 			}
 			continue;
 		}
@@ -373,19 +329,15 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
 			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
 
-		if (j + offset >= node->size - 1 && i < npages - 1) {
+		if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
 							 dst + i - j, j + 1,
 							 FROM_RAM_TO_VRAM,
 							 mfence);
 			if (r)
 				goto out_free_vram_pages;
-
-			node++;
-			pr_debug("next node size 0x%llx\n", node->size);
-			vram_addr = node->start << PAGE_SHIFT;
-			offset = 0;
-			j = 0;
+			amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
+			j= 0;
 		} else {
 			j++;
 		}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f2901e7a993ec..0d57ef9e73eb9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
 				 prange->svms, prange->start, prange->last);
 
-			prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
+			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
 			return true;
 		}
 
@@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
 
 	svm_bo->bo = bo;
 	prange->svm_bo = svm_bo;
-	prange->ttm_res = &bo->tbo.mem;
+	prange->ttm_res = bo->tbo.resource;
 	prange->offset = 0;
 
 	spin_lock(&svm_bo->list_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4f4c6438d2453..ae519f929c573 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8447,9 +8447,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		 * deadlock during GPU reset when this fence will not signal
 		 * but we hold reservation lock for the BO.
 		 */
-		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
-							false,
-							msecs_to_jiffies(5000));
+		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+					  msecs_to_jiffies(5000));
 		if (unlikely(r <= 0))
 			DRM_ERROR("Waiting for fences timed out!");
 
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c96e4b38d1d34..85b673613687d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -285,6 +285,16 @@ config DRM_TI_TFP410
 	help
 	  Texas Instruments TFP410 DVI/HDMI Transmitter driver
 
+config DRM_TI_SN65DSI83
+	tristate "TI SN65DSI83 and SN65DSI84 DSI to LVDS bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	select DRM_PANEL
+	select DRM_MIPI_DSI
+	help
+	  Texas Instruments SN65DSI83 and SN65DSI84 DSI to LVDS Bridge driver
+
 config DRM_TI_SN65DSI86
 	tristate "TI SN65DSI86 DSI to eDP bridge"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index dff2b28975292..f2c73683cfcb7 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
 obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
new file mode 100644
index 0000000000000..750f2172ef08d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI SN65DSI83,84,85 driver
+ *
+ * Currently supported:
+ * - SN65DSI83
+ *   = 1x Single-link DSI ~ 1x Single-link LVDS
+ *   - Supported
+ *   - Single-link LVDS mode tested
+ * - SN65DSI84
+ *   = 1x Single-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ *   - Supported
+ *   - Dual-link LVDS mode tested
+ *   - 2x Single-link LVDS mode unsupported
+ *     (should be easy to add by someone who has the HW)
+ * - SN65DSI85
+ *   = 2x Single-link or 1x Dual-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ *   - Unsupported
+ *     (should be easy to add by someone who has the HW)
+ *
+ * Copyright (C) 2021 Marek Vasut <marex@denx.de>
+ *
+ * Based on previous work of:
+ * Valentin Raevsky <valentin@compulab.co.il>
+ * Philippe Schenker <philippe.schenker@toradex.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* ID registers */
+#define REG_ID(n)				(0x00 + (n))
+/* Reset and clock registers */
+#define REG_RC_RESET				0x09
+#define  REG_RC_RESET_SOFT_RESET		BIT(0)
+#define REG_RC_LVDS_PLL				0x0a
+#define  REG_RC_LVDS_PLL_PLL_EN_STAT		BIT(7)
+#define  REG_RC_LVDS_PLL_LVDS_CLK_RANGE(n)	(((n) & 0x7) << 1)
+#define  REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY	BIT(0)
+#define REG_RC_DSI_CLK				0x0b
+#define  REG_RC_DSI_CLK_DSI_CLK_DIVIDER(n)	(((n) & 0x1f) << 3)
+#define  REG_RC_DSI_CLK_REFCLK_MULTIPLIER(n)	((n) & 0x3)
+#define REG_RC_PLL_EN				0x0d
+#define  REG_RC_PLL_EN_PLL_EN			BIT(0)
+/* DSI registers */
+#define REG_DSI_LANE				0x10
+#define  REG_DSI_LANE_LEFT_RIGHT_PIXELS		BIT(7)	/* DSI85-only */
+#define  REG_DSI_LANE_DSI_CHANNEL_MODE_DUAL	0	/* DSI85-only */
+#define  REG_DSI_LANE_DSI_CHANNEL_MODE_2SINGLE	BIT(6)	/* DSI85-only */
+#define  REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE	BIT(5)
+#define  REG_DSI_LANE_CHA_DSI_LANES(n)		(((n) & 0x3) << 3)
+#define  REG_DSI_LANE_CHB_DSI_LANES(n)		(((n) & 0x3) << 1)
+#define  REG_DSI_LANE_SOT_ERR_TOL_DIS		BIT(0)
+#define REG_DSI_EQ				0x11
+#define  REG_DSI_EQ_CHA_DSI_DATA_EQ(n)		(((n) & 0x3) << 6)
+#define  REG_DSI_EQ_CHA_DSI_CLK_EQ(n)		(((n) & 0x3) << 2)
+#define REG_DSI_CLK				0x12
+#define  REG_DSI_CLK_CHA_DSI_CLK_RANGE(n)	((n) & 0xff)
+/* LVDS registers */
+#define REG_LVDS_FMT				0x18
+#define  REG_LVDS_FMT_DE_NEG_POLARITY		BIT(7)
+#define  REG_LVDS_FMT_HS_NEG_POLARITY		BIT(6)
+#define  REG_LVDS_FMT_VS_NEG_POLARITY		BIT(5)
+#define  REG_LVDS_FMT_LVDS_LINK_CFG		BIT(4)	/* 0:AB 1:A-only */
+#define  REG_LVDS_FMT_CHA_24BPP_MODE		BIT(3)
+#define  REG_LVDS_FMT_CHB_24BPP_MODE		BIT(2)
+#define  REG_LVDS_FMT_CHA_24BPP_FORMAT1		BIT(1)
+#define  REG_LVDS_FMT_CHB_24BPP_FORMAT1		BIT(0)
+#define REG_LVDS_VCOM				0x19
+#define  REG_LVDS_VCOM_CHA_LVDS_VOCM		BIT(6)
+#define  REG_LVDS_VCOM_CHB_LVDS_VOCM		BIT(4)
+#define  REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(n)	(((n) & 0x3) << 2)
+#define  REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(n)	((n) & 0x3)
+#define REG_LVDS_LANE				0x1a
+#define  REG_LVDS_LANE_EVEN_ODD_SWAP		BIT(6)
+#define  REG_LVDS_LANE_CHA_REVERSE_LVDS		BIT(5)
+#define  REG_LVDS_LANE_CHB_REVERSE_LVDS		BIT(4)
+#define  REG_LVDS_LANE_CHA_LVDS_TERM		BIT(1)
+#define  REG_LVDS_LANE_CHB_LVDS_TERM		BIT(0)
+#define REG_LVDS_CM				0x1b
+#define  REG_LVDS_CM_CHA_LVDS_CM_ADJUST(n)	(((n) & 0x3) << 4)
+#define  REG_LVDS_CM_CHB_LVDS_CM_ADJUST(n)	((n) & 0x3)
+/* Video registers */
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW	0x20
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH	0x21
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW	0x24
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH	0x25
+#define REG_VID_CHA_SYNC_DELAY_LOW		0x28
+#define REG_VID_CHA_SYNC_DELAY_HIGH		0x29
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW	0x2c
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH	0x2d
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW	0x30
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH	0x31
+#define REG_VID_CHA_HORIZONTAL_BACK_PORCH	0x34
+#define REG_VID_CHA_VERTICAL_BACK_PORCH		0x36
+#define REG_VID_CHA_HORIZONTAL_FRONT_PORCH	0x38
+#define REG_VID_CHA_VERTICAL_FRONT_PORCH	0x3a
+#define REG_VID_CHA_TEST_PATTERN		0x3c
+/* IRQ registers */
+#define REG_IRQ_GLOBAL				0xe0
+#define  REG_IRQ_GLOBAL_IRQ_EN			BIT(0)
+#define REG_IRQ_EN				0xe1
+#define  REG_IRQ_EN_CHA_SYNCH_ERR_EN		BIT(7)
+#define  REG_IRQ_EN_CHA_CRC_ERR_EN		BIT(6)
+#define  REG_IRQ_EN_CHA_UNC_ECC_ERR_EN		BIT(5)
+#define  REG_IRQ_EN_CHA_COR_ECC_ERR_EN		BIT(4)
+#define  REG_IRQ_EN_CHA_LLP_ERR_EN		BIT(3)
+#define  REG_IRQ_EN_CHA_SOT_BIT_ERR_EN		BIT(2)
+#define  REG_IRQ_EN_CHA_PLL_UNLOCK_EN		BIT(0)
+#define REG_IRQ_STAT				0xe5
+#define  REG_IRQ_STAT_CHA_SYNCH_ERR		BIT(7)
+#define  REG_IRQ_STAT_CHA_CRC_ERR		BIT(6)
+#define  REG_IRQ_STAT_CHA_UNC_ECC_ERR		BIT(5)
+#define  REG_IRQ_STAT_CHA_COR_ECC_ERR		BIT(4)
+#define  REG_IRQ_STAT_CHA_LLP_ERR		BIT(3)
+#define  REG_IRQ_STAT_CHA_SOT_BIT_ERR		BIT(2)
+#define  REG_IRQ_STAT_CHA_PLL_UNLOCK		BIT(0)
+
+enum sn65dsi83_model {
+	MODEL_SN65DSI83,
+	MODEL_SN65DSI84,
+};
+
+struct sn65dsi83 {
+	struct drm_bridge		bridge;
+	struct drm_display_mode		mode;
+	struct device			*dev;
+	struct regmap			*regmap;
+	struct device_node		*host_node;
+	struct mipi_dsi_device		*dsi;
+	struct drm_bridge		*panel_bridge;
+	struct gpio_desc		*enable_gpio;
+	int				dsi_lanes;
+	bool				lvds_dual_link;
+	bool				lvds_dual_link_even_odd_swap;
+	bool				lvds_format_24bpp;
+	bool				lvds_format_jeida;
+};
+
+static const struct regmap_range sn65dsi83_readable_ranges[] = {
+	regmap_reg_range(REG_ID(0), REG_ID(8)),
+	regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_DSI_CLK),
+	regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+	regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+	regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+	regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+			 REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+			 REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+	regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+			 REG_VID_CHA_SYNC_DELAY_HIGH),
+	regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+			 REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+			 REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+			 REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+			 REG_VID_CHA_VERTICAL_BACK_PORCH),
+	regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+			 REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+			 REG_VID_CHA_VERTICAL_FRONT_PORCH),
+	regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+	regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+	regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_readable_table = {
+	.yes_ranges = sn65dsi83_readable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(sn65dsi83_readable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_writeable_ranges[] = {
+	regmap_reg_range(REG_RC_RESET, REG_RC_DSI_CLK),
+	regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+	regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+	regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+	regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+			 REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+			 REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+	regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+			 REG_VID_CHA_SYNC_DELAY_HIGH),
+	regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+			 REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+			 REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+	regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+			 REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+			 REG_VID_CHA_VERTICAL_BACK_PORCH),
+	regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+			 REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+	regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+			 REG_VID_CHA_VERTICAL_FRONT_PORCH),
+	regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+	regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+	regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_writeable_table = {
+	.yes_ranges = sn65dsi83_writeable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(sn65dsi83_writeable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_volatile_ranges[] = {
+	regmap_reg_range(REG_RC_RESET, REG_RC_RESET),
+	regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_LVDS_PLL),
+	regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_volatile_table = {
+	.yes_ranges = sn65dsi83_volatile_ranges,
+	.n_yes_ranges = ARRAY_SIZE(sn65dsi83_volatile_ranges),
+};
+
+static const struct regmap_config sn65dsi83_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.rd_table = &sn65dsi83_readable_table,
+	.wr_table = &sn65dsi83_writeable_table,
+	.volatile_table = &sn65dsi83_volatile_table,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = REG_IRQ_STAT,
+};
+
+static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct sn65dsi83, bridge);
+}
+
+static int sn65dsi83_attach(struct drm_bridge *bridge,
+			    enum drm_bridge_attach_flags flags)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+	struct device *dev = ctx->dev;
+	struct mipi_dsi_device *dsi;
+	struct mipi_dsi_host *host;
+	int ret = 0;
+
+	const struct mipi_dsi_device_info info = {
+		.type = "sn65dsi83",
+		.channel = 0,
+		.node = NULL,
+	};
+
+	host = of_find_mipi_dsi_host_by_node(ctx->host_node);
+	if (!host) {
+		dev_err(dev, "failed to find dsi host\n");
+		return -EPROBE_DEFER;
+	}
+
+	dsi = mipi_dsi_device_register_full(host, &info);
+	if (IS_ERR(dsi)) {
+		return dev_err_probe(dev, PTR_ERR(dsi),
+				     "failed to create dsi device\n");
+	}
+
+	ctx->dsi = dsi;
+
+	dsi->lanes = ctx->dsi_lanes;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		dev_err(dev, "failed to attach dsi to host\n");
+		goto err_dsi_attach;
+	}
+
+	return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+				 &ctx->bridge, flags);
+
+err_dsi_attach:
+	mipi_dsi_device_unregister(dsi);
+	return ret;
+}
+
+static void sn65dsi83_pre_enable(struct drm_bridge *bridge)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+	/*
+	 * Reset the chip, pull EN line low for t_reset=10ms,
+	 * then high for t_en=1ms.
+	 */
+	regcache_mark_dirty(ctx->regmap);
+	gpiod_set_value(ctx->enable_gpio, 0);
+	usleep_range(10000, 11000);
+	gpiod_set_value(ctx->enable_gpio, 1);
+	usleep_range(1000, 1100);
+}
+
+static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx)
+{
+	/*
+	 * The encoding of the LVDS_CLK_RANGE is as follows:
+	 * 000 - 25 MHz <= LVDS_CLK < 37.5 MHz
+	 * 001 - 37.5 MHz <= LVDS_CLK < 62.5 MHz
+	 * 010 - 62.5 MHz <= LVDS_CLK < 87.5 MHz
+	 * 011 - 87.5 MHz <= LVDS_CLK < 112.5 MHz
+	 * 100 - 112.5 MHz <= LVDS_CLK < 137.5 MHz
+	 * 101 - 137.5 MHz <= LVDS_CLK <= 154 MHz
+	 * which is a range of 12.5MHz..162.5MHz in 50MHz steps, except that
+	 * the ends of the ranges are clamped to the supported range. Since
+	 * sn65dsi83_mode_valid() already filters the valid modes and limits
+	 * the clock to 25..154 MHz, the range calculation can be simplified
+	 * as follows:
+	 */
+	int mode_clock = ctx->mode.clock;
+
+	if (ctx->lvds_dual_link)
+		mode_clock /= 2;
+
+	return (mode_clock - 12500) / 25000;
+}
+
+static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx)
+{
+	/*
+	 * The encoding of the CHA_DSI_CLK_RANGE is as follows:
+	 * 0x00 through 0x07 - Reserved
+	 * 0x08 - 40 <= DSI_CLK < 45 MHz
+	 * 0x09 - 45 <= DSI_CLK < 50 MHz
+	 * ...
+	 * 0x63 - 495 <= DSI_CLK < 500 MHz
+	 * 0x64 - 500 MHz
+	 * 0x65 through 0xFF - Reserved
+	 * which is DSI clock in 5 MHz steps, clamped to 40..500 MHz.
+	 * The DSI clock are calculated as:
+	 *  DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
+	 * the 2 is there because the bus is DDR.
+	 */
+	return DIV_ROUND_UP(clamp((unsigned int)ctx->mode.clock *
+			    mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
+			    ctx->dsi_lanes / 2, 40000U, 500000U), 5000U);
+}
+
+static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
+{
+	/* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */
+	unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format);
+
+	dsi_div /= ctx->dsi_lanes;
+
+	if (!ctx->lvds_dual_link)
+		dsi_div /= 2;
+
+	return dsi_div - 1;
+}
+
+static void sn65dsi83_enable(struct drm_bridge *bridge)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+	unsigned int pval;
+	u16 val;
+	int ret;
+
+	/* Clear reset, disable PLL */
+	regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+	regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+
+	/* Reference clock derived from DSI link clock. */
+	regmap_write(ctx->regmap, REG_RC_LVDS_PLL,
+		     REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx)) |
+		     REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY);
+	regmap_write(ctx->regmap, REG_DSI_CLK,
+		     REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx)));
+	regmap_write(ctx->regmap, REG_RC_DSI_CLK,
+		     REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx)));
+
+	/* Set number of DSI lanes and LVDS link config. */
+	regmap_write(ctx->regmap, REG_DSI_LANE,
+		     REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE |
+		     REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi_lanes - 1)) |
+		     /* CHB is DSI85-only, set to default on DSI83/DSI84 */
+		     REG_DSI_LANE_CHB_DSI_LANES(3));
+	/* No equalization. */
+	regmap_write(ctx->regmap, REG_DSI_EQ, 0x00);
+
+	/* Set up sync signal polarity. */
+	val = (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC ?
+	       REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
+	      (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC ?
+	       REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+
+	/* Set up bits-per-pixel, 18bpp or 24bpp. */
+	if (ctx->lvds_format_24bpp) {
+		val |= REG_LVDS_FMT_CHA_24BPP_MODE;
+		if (ctx->lvds_dual_link)
+			val |= REG_LVDS_FMT_CHB_24BPP_MODE;
+	}
+
+	/* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */
+	if (ctx->lvds_format_jeida) {
+		val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1;
+		if (ctx->lvds_dual_link)
+			val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1;
+	}
+
+	/* Set up LVDS output config (DSI84,DSI85) */
+	if (!ctx->lvds_dual_link)
+		val |= REG_LVDS_FMT_LVDS_LINK_CFG;
+
+	regmap_write(ctx->regmap, REG_LVDS_FMT, val);
+	regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+	regmap_write(ctx->regmap, REG_LVDS_LANE,
+		     (ctx->lvds_dual_link_even_odd_swap ?
+		      REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
+		     REG_LVDS_LANE_CHA_LVDS_TERM |
+		     REG_LVDS_LANE_CHB_LVDS_TERM);
+	regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
+
+	val = cpu_to_le16(ctx->mode.hdisplay);
+	regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+			  &val, 2);
+	val = cpu_to_le16(ctx->mode.vdisplay);
+	regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+			  &val, 2);
+	/* 32 + 1 pixel clock to ensure proper operation */
+	val = cpu_to_le16(32 + 1);
+	regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &val, 2);
+	val = cpu_to_le16(ctx->mode.hsync_end - ctx->mode.hsync_start);
+	regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+			  &val, 2);
+	val = cpu_to_le16(ctx->mode.vsync_end - ctx->mode.vsync_start);
+	regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+			  &val, 2);
+	regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+		     ctx->mode.htotal - ctx->mode.hsync_end);
+	regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
+		     ctx->mode.vtotal - ctx->mode.vsync_end);
+	regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+		     ctx->mode.hsync_start - ctx->mode.hdisplay);
+	regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
+		     ctx->mode.vsync_start - ctx->mode.vdisplay);
+	regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
+
+	/* Enable PLL */
+	regmap_write(ctx->regmap, REG_RC_PLL_EN, REG_RC_PLL_EN_PLL_EN);
+	usleep_range(3000, 4000);
+	ret = regmap_read_poll_timeout(ctx->regmap, REG_RC_LVDS_PLL, pval,
+				       pval & REG_RC_LVDS_PLL_PLL_EN_STAT,
+				       1000, 100000);
+	if (ret) {
+		dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+		/* On failure, disable PLL again and exit. */
+		regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+		return;
+	}
+
+	/* Trigger reset after CSR register update. */
+	regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
+
+	/* Clear all errors that got asserted during initialization. */
+	regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+	regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
+}
+
+static void sn65dsi83_disable(struct drm_bridge *bridge)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+	/* Clear reset, disable PLL */
+	regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+	regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+}
+
+static void sn65dsi83_post_disable(struct drm_bridge *bridge)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+	/* Put the chip in reset, pull EN line low. */
+	gpiod_set_value(ctx->enable_gpio, 0);
+}
+
+static enum drm_mode_status
+sn65dsi83_mode_valid(struct drm_bridge *bridge,
+		     const struct drm_display_info *info,
+		     const struct drm_display_mode *mode)
+{
+	/* LVDS output clock range 25..154 MHz */
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+	if (mode->clock > 154000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void sn65dsi83_mode_set(struct drm_bridge *bridge,
+			       const struct drm_display_mode *mode,
+			       const struct drm_display_mode *adj)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+	ctx->mode = *adj;
+}
+
+static bool sn65dsi83_mode_fixup(struct drm_bridge *bridge,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adj)
+{
+	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+	u32 input_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+	struct drm_encoder *encoder = bridge->encoder;
+	struct drm_device *ddev = encoder->dev;
+	struct drm_connector *connector;
+
+	/* The DSI format is always RGB888_1X24 */
+	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+		switch (connector->display_info.bus_formats[0]) {
+		case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+			ctx->lvds_format_24bpp = false;
+			ctx->lvds_format_jeida = true;
+			break;
+		case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+			ctx->lvds_format_24bpp = true;
+			ctx->lvds_format_jeida = true;
+			break;
+		case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+			ctx->lvds_format_24bpp = true;
+			ctx->lvds_format_jeida = false;
+			break;
+		default:
+			/*
+			 * Some bridges still don't set the correct
+			 * LVDS bus pixel format, use SPWG24 default
+			 * format until those are fixed.
+			 */
+			ctx->lvds_format_24bpp = true;
+			ctx->lvds_format_jeida = false;
+			dev_warn(ctx->dev,
+				 "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
+				 connector->display_info.bus_formats[0]);
+			break;
+		}
+
+		drm_display_info_set_bus_formats(&connector->display_info,
+						 &input_bus_format, 1);
+	}
+
+	return true;
+}
+
+static const struct drm_bridge_funcs sn65dsi83_funcs = {
+	.attach		= sn65dsi83_attach,
+	.pre_enable	= sn65dsi83_pre_enable,
+	.enable		= sn65dsi83_enable,
+	.disable	= sn65dsi83_disable,
+	.post_disable	= sn65dsi83_post_disable,
+	.mode_valid	= sn65dsi83_mode_valid,
+	.mode_set	= sn65dsi83_mode_set,
+	.mode_fixup	= sn65dsi83_mode_fixup,
+};
+
+static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
+{
+	struct drm_bridge *panel_bridge;
+	struct device *dev = ctx->dev;
+	struct device_node *endpoint;
+	struct drm_panel *panel;
+	int ret;
+
+	endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+	ctx->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+	ctx->host_node = of_graph_get_remote_port_parent(endpoint);
+	of_node_put(endpoint);
+
+	if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
+		return -EINVAL;
+	if (!ctx->host_node)
+		return -ENODEV;
+
+	ctx->lvds_dual_link = false;
+	ctx->lvds_dual_link_even_odd_swap = false;
+	if (model != MODEL_SN65DSI83) {
+		struct device_node *port2, *port3;
+		int dual_link;
+
+		port2 = of_graph_get_port_by_id(dev->of_node, 2);
+		port3 = of_graph_get_port_by_id(dev->of_node, 3);
+		dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
+		of_node_put(port2);
+		of_node_put(port3);
+
+		if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
+			ctx->lvds_dual_link = true;
+			/* Odd pixels to LVDS Channel A, even pixels to B */
+			ctx->lvds_dual_link_even_odd_swap = false;
+		} else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+			ctx->lvds_dual_link = true;
+			/* Even pixels to LVDS Channel A, odd pixels to B */
+			ctx->lvds_dual_link_even_odd_swap = true;
+		}
+	}
+
+	ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
+	if (ret < 0)
+		return ret;
+	if (panel) {
+		panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+		if (IS_ERR(panel_bridge))
+			return PTR_ERR(panel_bridge);
+	}
+
+	ctx->panel_bridge = panel_bridge;
+
+	return 0;
+}
+
+static int sn65dsi83_probe(struct i2c_client *client,
+			   const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	enum sn65dsi83_model model;
+	struct sn65dsi83 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev = dev;
+
+	if (dev->of_node) {
+		model = (enum sn65dsi83_model)(uintptr_t)
+			of_device_get_match_data(dev);
+	} else {
+		model = id->driver_data;
+	}
+
+	ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->enable_gpio))
+		return PTR_ERR(ctx->enable_gpio);
+
+	ret = sn65dsi83_parse_dt(ctx, model);
+	if (ret)
+		return ret;
+
+	ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
+	if (IS_ERR(ctx->regmap))
+		return PTR_ERR(ctx->regmap);
+
+	dev_set_drvdata(dev, ctx);
+	i2c_set_clientdata(client, ctx);
+
+	ctx->bridge.funcs = &sn65dsi83_funcs;
+	ctx->bridge.of_node = dev->of_node;
+	drm_bridge_add(&ctx->bridge);
+
+	return 0;
+}
+
+static int sn65dsi83_remove(struct i2c_client *client)
+{
+	struct sn65dsi83 *ctx = i2c_get_clientdata(client);
+
+	mipi_dsi_detach(ctx->dsi);
+	mipi_dsi_device_unregister(ctx->dsi);
+	drm_bridge_remove(&ctx->bridge);
+	of_node_put(ctx->host_node);
+
+	return 0;
+}
+
+static struct i2c_device_id sn65dsi83_id[] = {
+	{ "ti,sn65dsi83", MODEL_SN65DSI83 },
+	{ "ti,sn65dsi84", MODEL_SN65DSI84 },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, sn65dsi83_id);
+
+static const struct of_device_id sn65dsi83_match_table[] = {
+	{ .compatible = "ti,sn65dsi83", .data = (void *)MODEL_SN65DSI83 },
+	{ .compatible = "ti,sn65dsi84", .data = (void *)MODEL_SN65DSI84 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
+
+static struct i2c_driver sn65dsi83_driver = {
+	.probe = sn65dsi83_probe,
+	.remove = sn65dsi83_remove,
+	.id_table = sn65dsi83_id,
+	.driver = {
+		.name = "sn65dsi83",
+		.of_match_table = sn65dsi83_match_table,
+	},
+};
+module_i2c_driver(sn65dsi83_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("TI SN65DSI83 DSI to LVDS bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 79a50ef1250fd..546599f19a93a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,6 +28,7 @@
  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  */
 
+#include <linux/dma-buf-map.h>
 #include <linux/export.h>
 #include <linux/highmem.h>
 #include <linux/mem_encrypt.h>
@@ -35,6 +36,9 @@
 
 #include <drm/drm_cache.h>
 
+/* A small bounce buffer that fits on the stack. */
+#define MEMCPY_BOUNCE_SIZE 128
+
 #if defined(CONFIG_X86)
 #include <asm/smp.h>
 
@@ -209,3 +213,147 @@ bool drm_need_swiotlb(int dma_bits)
 	return max_iomem > ((u64)1 << dma_bits);
 }
 EXPORT_SYMBOL(drm_need_swiotlb);
+
+static void memcpy_fallback(struct dma_buf_map *dst,
+			    const struct dma_buf_map *src,
+			    unsigned long len)
+{
+	if (!dst->is_iomem && !src->is_iomem) {
+		memcpy(dst->vaddr, src->vaddr, len);
+	} else if (!src->is_iomem) {
+		dma_buf_map_memcpy_to(dst, src->vaddr, len);
+	} else if (!dst->is_iomem) {
+		memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
+	} else {
+		/*
+		 * Bounce size is not performance tuned, but using a
+		 * bounce buffer like this is significantly faster than
+		 * resorting to ioreadxx() + iowritexx().
+		 */
+		char bounce[MEMCPY_BOUNCE_SIZE];
+		void __iomem *_src = src->vaddr_iomem;
+		void __iomem *_dst = dst->vaddr_iomem;
+
+		while (len >= MEMCPY_BOUNCE_SIZE) {
+			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+			_src += MEMCPY_BOUNCE_SIZE;
+			_dst += MEMCPY_BOUNCE_SIZE;
+			len -= MEMCPY_BOUNCE_SIZE;
+		}
+		if (len) {
+			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+		}
+	}
+}
+
+#ifdef CONFIG_X86
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+	kernel_fpu_begin();
+
+	while (len >= 4) {
+		asm("movntdqa	(%0), %%xmm0\n"
+		    "movntdqa 16(%0), %%xmm1\n"
+		    "movntdqa 32(%0), %%xmm2\n"
+		    "movntdqa 48(%0), %%xmm3\n"
+		    "movaps %%xmm0,   (%1)\n"
+		    "movaps %%xmm1, 16(%1)\n"
+		    "movaps %%xmm2, 32(%1)\n"
+		    "movaps %%xmm3, 48(%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 64;
+		dst += 64;
+		len -= 4;
+	}
+	while (len--) {
+		asm("movntdqa (%0), %%xmm0\n"
+		    "movaps %%xmm0, (%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 16;
+		dst += 16;
+	}
+
+	kernel_fpu_end();
+}
+
+/*
+ * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ */
+static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+		memcpy(dst, src, len);
+	else if (likely(len))
+		__memcpy_ntdqa(dst, src, len >> 4);
+}
+
+/**
+ * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
+ * that may be WC.
+ * @dst: The destination pointer
+ * @src: The source pointer
+ * @len: The size of the area o transfer in bytes
+ *
+ * Tries an arch optimized memcpy for prefetching reading out of a WC region,
+ * and if no such beast is available, falls back to a normal memcpy.
+ */
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+			const struct dma_buf_map *src,
+			unsigned long len)
+{
+	if (WARN_ON(in_interrupt())) {
+		memcpy_fallback(dst, src, len);
+		return;
+	}
+
+	if (static_branch_likely(&has_movntdqa)) {
+		__drm_memcpy_from_wc(dst->is_iomem ?
+				     (void __force *)dst->vaddr_iomem :
+				     dst->vaddr,
+				     src->is_iomem ?
+				     (void const __force *)src->vaddr_iomem :
+				     src->vaddr,
+				     len);
+		return;
+	}
+
+	memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+/*
+ * drm_memcpy_init_early - One time initialization of the WC memcpy code
+ */
+void drm_memcpy_init_early(void)
+{
+	/*
+	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+	 * emulation. So don't enable movntdqa in hypervisor guest.
+	 */
+	if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+		static_branch_enable(&has_movntdqa);
+}
+#else
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+			const struct dma_buf_map *src,
+			unsigned long len)
+{
+	WARN_ON(in_interrupt());
+
+	memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+void drm_memcpy_init_early(void)
+{
+}
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3d8d68a98b95b..8804ec7d32156 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/srcu.h>
 
+#include <drm/drm_cache.h>
 #include <drm/drm_client.h>
 #include <drm/drm_color_mgmt.h>
 #include <drm/drm_drv.h>
@@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
 
 	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
+	drm_memcpy_init_early();
 
 	ret = drm_sysfs_init();
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9989425e9875a..d62fb1a3c9167 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 		return -EINVAL;
 	}
 
-	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
-						  true, timeout);
+	ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
 	if (ret == 0)
 		ret = -ETIME;
 	else if (ret > 0)
@@ -1375,12 +1374,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 
 	if (!write) {
 		struct dma_fence *fence =
-			dma_resv_get_excl_rcu(obj->resv);
+			dma_resv_get_excl_unlocked(obj->resv);
 
 		return drm_gem_fence_array_add(fence_array, fence);
 	}
 
-	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+	ret = dma_resv_get_fences(obj->resv, NULL,
 						&fence_count, &fences);
 	if (ret || !fence_count)
 		return ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index a005c5a0ba46a..a27135084ae5c 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
 		return 0;
 
 	obj = drm_gem_fb_get_obj(state->fb, 0);
-	fence = dma_resv_get_excl_rcu(obj->resv);
+	fence = dma_resv_get_excl_unlocked(obj->resv);
 	drm_atomic_set_fence_for_plane(state, fence);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4a24af2c5c43e..d53388199f34c 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -104,8 +104,7 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
  * @size: size of the object to allocate
  *
  * This function creates a CMA GEM object and allocates a contiguous chunk of
- * memory as backing store. The backing memory has the writecombine attribute
- * set.
+ * memory as backing store.
  *
  * Returns:
  * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index b14bed8be771d..ecf3d2a54a98c 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
 	const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 
 	drm_printf_indent(p, indent, "placement=");
-	drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+	drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
 	drm_printf(p, "\n");
 
-	if (bo->mem.bus.is_iomem)
+	if (bo->resource->bus.is_iomem)
 		drm_printf_indent(p, indent, "bus.offset=%lx\n",
-				  (unsigned long)bo->mem.bus.offset);
+				  (unsigned long)bo->resource->bus.offset);
 }
 EXPORT_SYMBOL(drm_gem_ttm_print_info);
 
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 797200315854f..2a1229b8364e3 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -17,6 +17,8 @@
 #include <drm/drm_prime.h>
 #include <drm/drm_simple_kms_helper.h>
 
+#include <drm/ttm/ttm_range_manager.h>
+
 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
 
 /**
@@ -248,10 +250,11 @@ EXPORT_SYMBOL(drm_gem_vram_put);
 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
 {
 	/* Keep TTM behavior for now, remove when drivers are audited */
-	if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
+	if (WARN_ON_ONCE(!gbo->bo.resource ||
+			 gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
 		return 0;
 
-	return gbo->bo.mem.start;
+	return gbo->bo.resource->start;
 }
 
 /**
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index db69f19ab5bca..b8fa6ed3dd738 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	}
 
 	if (op & ETNA_PREP_NOSYNC) {
-		if (!dma_resv_test_signaled_rcu(obj->resv,
-							  write))
+		if (!dma_resv_test_signaled(obj->resv, write))
 			return -EBUSY;
 	} else {
 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 
-		ret = dma_resv_wait_timeout_rcu(obj->resv,
-							  write, true, remain);
+		ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
 		if (ret <= 0)
 			return ret == 0 ? -ETIMEDOUT : ret;
 	}
@@ -461,7 +459,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			off, etnaviv_obj->vaddr, obj->size);
 
 	rcu_read_lock();
-	fobj = rcu_dereference(robj->fence);
+	fobj = dma_resv_shared_list(robj);
 	if (fobj) {
 		unsigned int i, shared_count = fobj->shared_count;
 
@@ -471,7 +469,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 		}
 	}
 
-	fence = rcu_dereference(robj->fence_excl);
+	fence = dma_resv_excl_fence(robj);
 	if (fence)
 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index d05c359945799..d53856d7a747b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 			continue;
 
 		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
-								&bo->nr_shared,
-								&bo->shared);
+			ret = dma_resv_get_fences(robj, &bo->excl,
+						  &bo->nr_shared,
+						  &bo->shared);
 			if (ret)
 				return ret;
 		} else {
-			bo->excl = dma_resv_get_excl_rcu(robj);
+			bo->excl = dma_resv_get_excl_unlocked(robj);
 		}
 
 	}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 431c5d32f9a47..9b5e6f94e5585 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -344,7 +344,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
 }
 
 /**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
  *
  * @ctx: display and enhancement controller context
  * @win: window to protect registers for
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 4f2b7551b2515..9ae8689353579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -88,7 +88,7 @@ void exynos_drm_ipp_unregister(struct device *dev,
 }
 
 /**
- * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
+ * exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
  * @dev: DRM device
  * @data: ioctl data
  * @file_priv: DRM file info
@@ -136,7 +136,7 @@ static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
 }
 
 /**
- * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
+ * exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
  * @dev: DRM device
  * @data: ioctl data
  * @file_priv: DRM file info
diff --git a/drivers/gpu/drm/hyperv/Makefile b/drivers/gpu/drm/hyperv/Makefile
new file mode 100644
index 0000000000000..265f12f2c6600
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+hyperv_drm-y := \
+	hyperv_drm_drv.o \
+	hyperv_drm_modeset.o \
+	hyperv_drm_proto.o
+
+obj-$(CONFIG_DRM_HYPERV) += hyperv_drm.o
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
new file mode 100644
index 0000000000000..886add4f9cd0f
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#ifndef _HYPERV_DRM_H_
+#define _HYPERV_DRM_H_
+
+#define VMBUS_MAX_PACKET_SIZE 0x4000
+
+struct hyperv_drm_device {
+	/* drm */
+	struct drm_device dev;
+	struct drm_simple_display_pipe pipe;
+	struct drm_connector connector;
+
+	/* mode */
+	u32 screen_width_max;
+	u32 screen_height_max;
+	u32 preferred_width;
+	u32 preferred_height;
+	u32 screen_depth;
+
+	/* hw */
+	struct resource *mem;
+	void __iomem *vram;
+	unsigned long fb_base;
+	unsigned long fb_size;
+	struct completion wait;
+	u32 synthvid_version;
+	u32 mmio_megabytes;
+	bool dirt_needed;
+
+	u8 init_buf[VMBUS_MAX_PACKET_SIZE];
+	u8 recv_buf[VMBUS_MAX_PACKET_SIZE];
+
+	struct hv_device *hdev;
+};
+
+#define to_hv(_dev) container_of(_dev, struct hyperv_drm_device, dev)
+
+/* hyperv_drm_modeset */
+int hyperv_mode_config_init(struct hyperv_drm_device *hv);
+
+/* hyperv_drm_proto */
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+			    u32 w, u32 h, u32 pitch);
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
+int hyperv_connect_vsp(struct hv_device *hdev);
+
+#endif
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
new file mode 100644
index 0000000000000..eb06c92c4bfdc
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/efi.h>
+#include <linux/hyperv.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define DRIVER_NAME "hyperv_drm"
+#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
+#define DRIVER_DATE "2020"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
+DEFINE_DRM_GEM_FOPS(hv_fops);
+
+static struct drm_driver hyperv_driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+	.name		 = DRIVER_NAME,
+	.desc		 = DRIVER_DESC,
+	.date		 = DRIVER_DATE,
+	.major		 = DRIVER_MAJOR,
+	.minor		 = DRIVER_MINOR,
+
+	.fops		 = &hv_fops,
+	DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int hyperv_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	return 0;
+}
+
+static void hyperv_pci_remove(struct pci_dev *pdev)
+{
+}
+
+static const struct pci_device_id hyperv_pci_tbl[] = {
+	{
+		.vendor = PCI_VENDOR_ID_MICROSOFT,
+		.device = PCI_DEVICE_ID_HYPERV_VIDEO,
+	},
+	{ /* end of list */ }
+};
+
+/*
+ * PCI stub to support gen1 VM.
+ */
+static struct pci_driver hyperv_pci_driver = {
+	.name =		KBUILD_MODNAME,
+	.id_table =	hyperv_pci_tbl,
+	.probe =	hyperv_pci_probe,
+	.remove =	hyperv_pci_remove,
+};
+
+static int hyperv_setup_gen1(struct hyperv_drm_device *hv)
+{
+	struct drm_device *dev = &hv->dev;
+	struct pci_dev *pdev;
+	int ret;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+			      PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+	if (!pdev) {
+		drm_err(dev, "Unable to find PCI Hyper-V video\n");
+		return -ENODEV;
+	}
+
+	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hypervdrmfb");
+	if (ret) {
+		drm_err(dev, "Not able to remove boot fb\n");
+		return ret;
+	}
+
+	if (pci_request_region(pdev, 0, DRIVER_NAME) != 0)
+		drm_warn(dev, "Cannot request framebuffer, boot fb still active?\n");
+
+	if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) {
+		drm_err(dev, "Resource at bar 0 is not IORESOURCE_MEM\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	hv->fb_base = pci_resource_start(pdev, 0);
+	hv->fb_size = pci_resource_len(pdev, 0);
+	if (!hv->fb_base) {
+		drm_err(dev, "Resource not available\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	hv->fb_size = min(hv->fb_size,
+			  (unsigned long)(hv->mmio_megabytes * 1024 * 1024));
+	hv->vram = devm_ioremap(&pdev->dev, hv->fb_base, hv->fb_size);
+	if (!hv->vram) {
+		drm_err(dev, "Failed to map vram\n");
+		ret = -ENOMEM;
+	}
+
+error:
+	pci_dev_put(pdev);
+	return ret;
+}
+
+static int hyperv_setup_gen2(struct hyperv_drm_device *hv,
+			     struct hv_device *hdev)
+{
+	struct drm_device *dev = &hv->dev;
+	int ret;
+
+	drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
+						     screen_info.lfb_size,
+						     false,
+						     "hypervdrmfb");
+
+	hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
+
+	ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
+				  true);
+	if (ret) {
+		drm_err(dev, "Failed to allocate mmio\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Map the VRAM cacheable for performance. This is also required for VM
+	 * connect to display properly for ARM64 Linux VM, as the host also maps
+	 * the VRAM cacheable.
+	 */
+	hv->vram = ioremap_cache(hv->mem->start, hv->fb_size);
+	if (!hv->vram) {
+		drm_err(dev, "Failed to map vram\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	hv->fb_base = hv->mem->start;
+	return 0;
+
+error:
+	vmbus_free_mmio(hv->mem->start, hv->fb_size);
+	return ret;
+}
+
+static int hyperv_vmbus_probe(struct hv_device *hdev,
+			      const struct hv_vmbus_device_id *dev_id)
+{
+	struct hyperv_drm_device *hv;
+	struct drm_device *dev;
+	int ret;
+
+	hv = devm_drm_dev_alloc(&hdev->device, &hyperv_driver,
+				struct hyperv_drm_device, dev);
+	if (IS_ERR(hv))
+		return PTR_ERR(hv);
+
+	dev = &hv->dev;
+	init_completion(&hv->wait);
+	hv_set_drvdata(hdev, hv);
+	hv->hdev = hdev;
+
+	ret = hyperv_connect_vsp(hdev);
+	if (ret) {
+		drm_err(dev, "Failed to connect to vmbus.\n");
+		goto err_hv_set_drv_data;
+	}
+
+	if (efi_enabled(EFI_BOOT))
+		ret = hyperv_setup_gen2(hv, hdev);
+	else
+		ret = hyperv_setup_gen1(hv);
+
+	if (ret)
+		goto err_vmbus_close;
+
+	/*
+	 * Should be done only once during init and resume. Failing to update
+	 * vram location is not fatal. Device will update dirty area till
+	 * preferred resolution only.
+	 */
+	ret = hyperv_update_vram_location(hdev, hv->fb_base);
+	if (ret)
+		drm_warn(dev, "Failed to update vram location.\n");
+
+	hv->dirt_needed = true;
+
+	ret = hyperv_mode_config_init(hv);
+	if (ret)
+		goto err_vmbus_close;
+
+	ret = drm_dev_register(dev, 0);
+	if (ret) {
+		drm_err(dev, "Failed to register drm driver.\n");
+		goto err_vmbus_close;
+	}
+
+	drm_fbdev_generic_setup(dev, 0);
+
+	return 0;
+
+err_vmbus_close:
+	vmbus_close(hdev->channel);
+err_hv_set_drv_data:
+	hv_set_drvdata(hdev, NULL);
+	return ret;
+}
+
+static int hyperv_vmbus_remove(struct hv_device *hdev)
+{
+	struct drm_device *dev = hv_get_drvdata(hdev);
+	struct hyperv_drm_device *hv = to_hv(dev);
+
+	drm_dev_unplug(dev);
+	drm_atomic_helper_shutdown(dev);
+	vmbus_close(hdev->channel);
+	hv_set_drvdata(hdev, NULL);
+	vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+	return 0;
+}
+
+static int hyperv_vmbus_suspend(struct hv_device *hdev)
+{
+	struct drm_device *dev = hv_get_drvdata(hdev);
+	int ret;
+
+	ret = drm_mode_config_helper_suspend(dev);
+	if (ret)
+		return ret;
+
+	vmbus_close(hdev->channel);
+
+	return 0;
+}
+
+static int hyperv_vmbus_resume(struct hv_device *hdev)
+{
+	struct drm_device *dev = hv_get_drvdata(hdev);
+	struct hyperv_drm_device *hv = to_hv(dev);
+	int ret;
+
+	ret = hyperv_connect_vsp(hdev);
+	if (ret)
+		return ret;
+
+	ret = hyperv_update_vram_location(hdev, hv->fb_base);
+	if (ret)
+		return ret;
+
+	return drm_mode_config_helper_resume(dev);
+}
+
+static const struct hv_vmbus_device_id hyperv_vmbus_tbl[] = {
+	/* Synthetic Video Device GUID */
+	{HV_SYNTHVID_GUID},
+	{}
+};
+
+static struct hv_driver hyperv_hv_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = hyperv_vmbus_tbl,
+	.probe = hyperv_vmbus_probe,
+	.remove = hyperv_vmbus_remove,
+	.suspend = hyperv_vmbus_suspend,
+	.resume = hyperv_vmbus_resume,
+	.driver = {
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+static int __init hyperv_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&hyperv_pci_driver);
+	if (ret != 0)
+		return ret;
+
+	return vmbus_driver_register(&hyperv_hv_driver);
+}
+
+static void __exit hyperv_exit(void)
+{
+	vmbus_driver_unregister(&hyperv_hv_driver);
+	pci_unregister_driver(&hyperv_pci_driver);
+}
+
+module_init(hyperv_init);
+module_exit(hyperv_exit);
+
+MODULE_DEVICE_TABLE(pci, hyperv_pci_tbl);
+MODULE_DEVICE_TABLE(vmbus, hyperv_vmbus_tbl);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Deepak Rawat <drawat.floss@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Hyper-V synthetic video device");
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
new file mode 100644
index 0000000000000..02718e3e859e7
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
+				    const struct dma_buf_map *map,
+				    struct drm_rect *rect)
+{
+	struct hyperv_drm_device *hv = to_hv(fb->dev);
+	void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+	int idx;
+
+	if (!drm_dev_enter(&hv->dev, &idx))
+		return -ENODEV;
+
+	drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect);
+	drm_dev_exit(idx);
+
+	return 0;
+}
+
+static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
+{
+	struct drm_rect fullscreen = {
+		.x1 = 0,
+		.x2 = fb->width,
+		.y1 = 0,
+		.y2 = fb->height,
+	};
+	return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
+}
+
+static int hyperv_connector_get_modes(struct drm_connector *connector)
+{
+	struct hyperv_drm_device *hv = to_hv(connector->dev);
+	int count;
+
+	count = drm_add_modes_noedid(connector,
+				     connector->dev->mode_config.max_width,
+				     connector->dev->mode_config.max_height);
+	drm_set_preferred_mode(connector, hv->preferred_width,
+			       hv->preferred_height);
+
+	return count;
+}
+
+static const struct drm_connector_helper_funcs hyperv_connector_helper_funcs = {
+	.get_modes = hyperv_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hyperv_connector_funcs = {
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hyperv_conn_init(struct hyperv_drm_device *hv)
+{
+	drm_connector_helper_add(&hv->connector, &hyperv_connector_helper_funcs);
+	return drm_connector_init(&hv->dev, &hv->connector,
+				  &hyperv_connector_funcs,
+				  DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
+			     struct drm_framebuffer *fb)
+{
+	u32 pitch = w * (hv->screen_depth / 8);
+
+	if (fb)
+		pitch = fb->pitches[0];
+
+	if (pitch * h > hv->fb_size)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
+			       struct drm_crtc_state *crtc_state,
+			       struct drm_plane_state *plane_state)
+{
+	struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+
+	hyperv_update_situation(hv->hdev, 1,  hv->screen_depth,
+				crtc_state->mode.hdisplay,
+				crtc_state->mode.vdisplay,
+				plane_state->fb->pitches[0]);
+	hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->map[0]);
+}
+
+static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
+			     struct drm_plane_state *plane_state,
+			     struct drm_crtc_state *crtc_state)
+{
+	struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (fb->format->format != DRM_FORMAT_XRGB8888)
+		return -EINVAL;
+
+	if (fb->pitches[0] * fb->height > hv->fb_size)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
+			       struct drm_plane_state *old_state)
+{
+	struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+	struct drm_plane_state *state = pipe->plane.state;
+	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+	struct drm_rect rect;
+
+	if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
+		hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->map[0], &rect);
+		hyperv_update_dirt(hv->hdev, &rect);
+	}
+}
+
+static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
+	.enable	= hyperv_pipe_enable,
+	.check = hyperv_pipe_check,
+	.update	= hyperv_pipe_update,
+	DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+};
+
+static const uint32_t hyperv_formats[] = {
+	DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
+static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
+{
+	int ret;
+
+	ret = drm_simple_display_pipe_init(&hv->dev,
+					   &hv->pipe,
+					   &hyperv_pipe_funcs,
+					   hyperv_formats,
+					   ARRAY_SIZE(hyperv_formats),
+					   NULL,
+					   &hv->connector);
+	if (ret)
+		return ret;
+
+	drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+
+	return 0;
+}
+
+static enum drm_mode_status
+hyperv_mode_valid(struct drm_device *dev,
+		  const struct drm_display_mode *mode)
+{
+	struct hyperv_drm_device *hv = to_hv(dev);
+
+	if (hyperv_check_size(hv, mode->hdisplay, mode->vdisplay, NULL))
+		return MODE_BAD;
+
+	return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs hyperv_mode_config_funcs = {
+	.fb_create = drm_gem_fb_create_with_dirty,
+	.mode_valid = hyperv_mode_valid,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+int hyperv_mode_config_init(struct hyperv_drm_device *hv)
+{
+	struct drm_device *dev = &hv->dev;
+	int ret;
+
+	ret = drmm_mode_config_init(dev);
+	if (ret) {
+		drm_err(dev, "Failed to initialized mode setting.\n");
+		return ret;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = hv->screen_width_max;
+	dev->mode_config.max_height = hv->screen_height_max;
+
+	dev->mode_config.preferred_depth = hv->screen_depth;
+	dev->mode_config.prefer_shadow = 0;
+
+	dev->mode_config.funcs = &hyperv_mode_config_funcs;
+
+	ret = hyperv_conn_init(hv);
+	if (ret) {
+		drm_err(dev, "Failed to initialized connector.\n");
+		return ret;
+	}
+
+	ret = hyperv_pipe_init(hv);
+	if (ret) {
+		drm_err(dev, "Failed to initialized pipe.\n");
+		return ret;
+	}
+
+	drm_mode_config_reset(dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
new file mode 100644
index 0000000000000..6d4bdccfbd1ad
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ *
+ * Portions of this code is derived from hyperv_fb.c
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define VMBUS_RING_BUFSIZE (256 * 1024)
+#define VMBUS_VSP_TIMEOUT (10 * HZ)
+
+#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
+#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
+#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
+#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
+#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
+#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
+
+#define SYNTHVID_DEPTH_WIN7 16
+#define SYNTHVID_DEPTH_WIN8 32
+#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
+#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
+#define SYNTHVID_WIDTH_MAX_WIN7 1600
+#define SYNTHVID_HEIGHT_MAX_WIN7 1200
+
+enum pipe_msg_type {
+	PIPE_MSG_INVALID,
+	PIPE_MSG_DATA,
+	PIPE_MSG_MAX
+};
+
+enum synthvid_msg_type {
+	SYNTHVID_ERROR			= 0,
+	SYNTHVID_VERSION_REQUEST	= 1,
+	SYNTHVID_VERSION_RESPONSE	= 2,
+	SYNTHVID_VRAM_LOCATION		= 3,
+	SYNTHVID_VRAM_LOCATION_ACK	= 4,
+	SYNTHVID_SITUATION_UPDATE	= 5,
+	SYNTHVID_SITUATION_UPDATE_ACK	= 6,
+	SYNTHVID_POINTER_POSITION	= 7,
+	SYNTHVID_POINTER_SHAPE		= 8,
+	SYNTHVID_FEATURE_CHANGE		= 9,
+	SYNTHVID_DIRT			= 10,
+	SYNTHVID_RESOLUTION_REQUEST	= 13,
+	SYNTHVID_RESOLUTION_RESPONSE	= 14,
+
+	SYNTHVID_MAX			= 15
+};
+
+struct pipe_msg_hdr {
+	u32 type;
+	u32 size; /* size of message after this field */
+} __packed;
+
+struct hvd_screen_info {
+	u16 width;
+	u16 height;
+} __packed;
+
+struct synthvid_msg_hdr {
+	u32 type;
+	u32 size;  /* size of this header + payload after this field */
+} __packed;
+
+struct synthvid_version_req {
+	u32 version;
+} __packed;
+
+struct synthvid_version_resp {
+	u32 version;
+	u8 is_accepted;
+	u8 max_video_outputs;
+} __packed;
+
+struct synthvid_vram_location {
+	u64 user_ctx;
+	u8 is_vram_gpa_specified;
+	u64 vram_gpa;
+} __packed;
+
+struct synthvid_vram_location_ack {
+	u64 user_ctx;
+} __packed;
+
+struct video_output_situation {
+	u8 active;
+	u32 vram_offset;
+	u8 depth_bits;
+	u32 width_pixels;
+	u32 height_pixels;
+	u32 pitch_bytes;
+} __packed;
+
+struct synthvid_situation_update {
+	u64 user_ctx;
+	u8 video_output_count;
+	struct video_output_situation video_output[1];
+} __packed;
+
+struct synthvid_situation_update_ack {
+	u64 user_ctx;
+} __packed;
+
+struct synthvid_pointer_position {
+	u8 is_visible;
+	u8 video_output;
+	s32 image_x;
+	s32 image_y;
+} __packed;
+
+#define SYNTHVID_CURSOR_MAX_X 96
+#define SYNTHVID_CURSOR_MAX_Y 96
+#define SYNTHVID_CURSOR_ARGB_PIXEL_SIZE 4
+#define SYNTHVID_CURSOR_MAX_SIZE (SYNTHVID_CURSOR_MAX_X * \
+	SYNTHVID_CURSOR_MAX_Y * SYNTHVID_CURSOR_ARGB_PIXEL_SIZE)
+#define SYNTHVID_CURSOR_COMPLETE (-1)
+
+struct synthvid_pointer_shape {
+	u8 part_idx;
+	u8 is_argb;
+	u32 width; /* SYNTHVID_CURSOR_MAX_X at most */
+	u32 height; /* SYNTHVID_CURSOR_MAX_Y at most */
+	u32 hot_x; /* hotspot relative to upper-left of pointer image */
+	u32 hot_y;
+	u8 data[4];
+} __packed;
+
+struct synthvid_feature_change {
+	u8 is_dirt_needed;
+	u8 is_ptr_pos_needed;
+	u8 is_ptr_shape_needed;
+	u8 is_situ_needed;
+} __packed;
+
+struct rect {
+	s32 x1, y1; /* top left corner */
+	s32 x2, y2; /* bottom right corner, exclusive */
+} __packed;
+
+struct synthvid_dirt {
+	u8 video_output;
+	u8 dirt_count;
+	struct rect rect[1];
+} __packed;
+
+#define SYNTHVID_EDID_BLOCK_SIZE	128
+#define	SYNTHVID_MAX_RESOLUTION_COUNT	64
+
+struct synthvid_supported_resolution_req {
+	u8 maximum_resolution_count;
+} __packed;
+
+struct synthvid_supported_resolution_resp {
+	u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
+	u8 resolution_count;
+	u8 default_resolution_index;
+	u8 is_standard;
+	struct hvd_screen_info supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
+} __packed;
+
+struct synthvid_msg {
+	struct pipe_msg_hdr pipe_hdr;
+	struct synthvid_msg_hdr vid_hdr;
+	union {
+		struct synthvid_version_req ver_req;
+		struct synthvid_version_resp ver_resp;
+		struct synthvid_vram_location vram;
+		struct synthvid_vram_location_ack vram_ack;
+		struct synthvid_situation_update situ;
+		struct synthvid_situation_update_ack situ_ack;
+		struct synthvid_pointer_position ptr_pos;
+		struct synthvid_pointer_shape ptr_shape;
+		struct synthvid_feature_change feature_chg;
+		struct synthvid_dirt dirt;
+		struct synthvid_supported_resolution_req resolution_req;
+		struct synthvid_supported_resolution_resp resolution_resp;
+	};
+} __packed;
+
+static inline bool hyperv_version_ge(u32 ver1, u32 ver2)
+{
+	if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
+	    (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
+	     SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
+		return true;
+
+	return false;
+}
+
+static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg *msg)
+{
+	static atomic64_t request_id = ATOMIC64_INIT(0);
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	int ret;
+
+	msg->pipe_hdr.type = PIPE_MSG_DATA;
+	msg->pipe_hdr.size = msg->vid_hdr.size;
+
+	ret = vmbus_sendpacket(hdev->channel, msg,
+			       msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
+			       atomic64_inc_return(&request_id),
+			       VM_PKT_DATA_INBAND, 0);
+
+	if (ret)
+		drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+
+	return ret;
+}
+
+static int hyperv_negotiate_version(struct hv_device *hdev, u32 ver)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+	struct drm_device *dev = &hv->dev;
+	unsigned long t;
+
+	memset(msg, 0, sizeof(struct synthvid_msg));
+	msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
+	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+		sizeof(struct synthvid_version_req);
+	msg->ver_req.version = ver;
+	hyperv_sendpacket(hdev, msg);
+
+	t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+	if (!t) {
+		drm_err(dev, "Time out on waiting version response\n");
+		return -ETIMEDOUT;
+	}
+
+	if (!msg->ver_resp.is_accepted) {
+		drm_err(dev, "Version request not accepted\n");
+		return -ENODEV;
+	}
+
+	hv->synthvid_version = ver;
+	drm_info(dev, "Synthvid Version major %d, minor %d\n",
+		 SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
+
+	return 0;
+}
+
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+	struct drm_device *dev = &hv->dev;
+	unsigned long t;
+
+	memset(msg, 0, sizeof(struct synthvid_msg));
+	msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
+	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+		sizeof(struct synthvid_vram_location);
+	msg->vram.user_ctx = vram_pp;
+	msg->vram.vram_gpa = vram_pp;
+	msg->vram.is_vram_gpa_specified = 1;
+	hyperv_sendpacket(hdev, msg);
+
+	t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+	if (!t) {
+		drm_err(dev, "Time out on waiting vram location ack\n");
+		return -ETIMEDOUT;
+	}
+	if (msg->vram_ack.user_ctx != vram_pp) {
+		drm_err(dev, "Unable to set VRAM location\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+			    u32 w, u32 h, u32 pitch)
+{
+	struct synthvid_msg msg;
+
+	memset(&msg, 0, sizeof(struct synthvid_msg));
+
+	msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
+	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+		sizeof(struct synthvid_situation_update);
+	msg.situ.user_ctx = 0;
+	msg.situ.video_output_count = 1;
+	msg.situ.video_output[0].active = active;
+	/* vram_offset should always be 0 */
+	msg.situ.video_output[0].vram_offset = 0;
+	msg.situ.video_output[0].depth_bits = bpp;
+	msg.situ.video_output[0].width_pixels = w;
+	msg.situ.video_output[0].height_pixels = h;
+	msg.situ.video_output[0].pitch_bytes = pitch;
+
+	hyperv_sendpacket(hdev, &msg);
+
+	return 0;
+}
+
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg msg;
+
+	if (!hv->dirt_needed)
+		return 0;
+
+	memset(&msg, 0, sizeof(struct synthvid_msg));
+
+	msg.vid_hdr.type = SYNTHVID_DIRT;
+	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+		sizeof(struct synthvid_dirt);
+	msg.dirt.video_output = 0;
+	msg.dirt.dirt_count = 1;
+	msg.dirt.rect[0].x1 = rect->x1;
+	msg.dirt.rect[0].y1 = rect->y1;
+	msg.dirt.rect[0].x2 = rect->x2;
+	msg.dirt.rect[0].y2 = rect->y2;
+
+	hyperv_sendpacket(hdev, &msg);
+
+	return 0;
+}
+
+static int hyperv_get_supported_resolution(struct hv_device *hdev)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+	struct drm_device *dev = &hv->dev;
+	unsigned long t;
+	u8 index;
+	int i;
+
+	memset(msg, 0, sizeof(struct synthvid_msg));
+	msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+		sizeof(struct synthvid_supported_resolution_req);
+	msg->resolution_req.maximum_resolution_count =
+		SYNTHVID_MAX_RESOLUTION_COUNT;
+	hyperv_sendpacket(hdev, msg);
+
+	t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+	if (!t) {
+		drm_err(dev, "Time out on waiting resolution response\n");
+		return -ETIMEDOUT;
+	}
+
+	if (msg->resolution_resp.resolution_count == 0) {
+		drm_err(dev, "No supported resolutions\n");
+		return -ENODEV;
+	}
+
+	index = msg->resolution_resp.default_resolution_index;
+	if (index >= msg->resolution_resp.resolution_count) {
+		drm_err(dev, "Invalid resolution index: %d\n", index);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+		hv->screen_width_max = max_t(u32, hv->screen_width_max,
+			msg->resolution_resp.supported_resolution[i].width);
+		hv->screen_height_max = max_t(u32, hv->screen_height_max,
+			msg->resolution_resp.supported_resolution[i].height);
+	}
+
+	hv->preferred_width =
+		msg->resolution_resp.supported_resolution[index].width;
+	hv->preferred_height =
+		msg->resolution_resp.supported_resolution[index].height;
+
+	return 0;
+}
+
+static void hyperv_receive_sub(struct hv_device *hdev)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg *msg;
+
+	if (!hv)
+		return;
+
+	msg = (struct synthvid_msg *)hv->recv_buf;
+
+	/* Complete the wait event */
+	if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
+	    msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
+	    msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
+		memcpy(hv->init_buf, msg, VMBUS_MAX_PACKET_SIZE);
+		complete(&hv->wait);
+		return;
+	}
+
+	if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+		hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+}
+
+static void hyperv_receive(void *ctx)
+{
+	struct hv_device *hdev = ctx;
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct synthvid_msg *recv_buf;
+	u32 bytes_recvd;
+	u64 req_id;
+	int ret;
+
+	if (!hv)
+		return;
+
+	recv_buf = (struct synthvid_msg *)hv->recv_buf;
+
+	do {
+		ret = vmbus_recvpacket(hdev->channel, recv_buf,
+				       VMBUS_MAX_PACKET_SIZE,
+				       &bytes_recvd, &req_id);
+		if (bytes_recvd > 0 &&
+		    recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
+			hyperv_receive_sub(hdev);
+	} while (bytes_recvd > 0 && ret == 0);
+}
+
+int hyperv_connect_vsp(struct hv_device *hdev)
+{
+	struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+	struct drm_device *dev = &hv->dev;
+	int ret;
+
+	ret = vmbus_open(hdev->channel, VMBUS_RING_BUFSIZE, VMBUS_RING_BUFSIZE,
+			 NULL, 0, hyperv_receive, hdev);
+	if (ret) {
+		drm_err(dev, "Unable to open vmbus channel\n");
+		return ret;
+	}
+
+	/* Negotiate the protocol version with host */
+	switch (vmbus_proto_version) {
+	case VERSION_WIN10:
+	case VERSION_WIN10_V5:
+		ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+		if (!ret)
+			break;
+		fallthrough;
+	case VERSION_WIN8:
+	case VERSION_WIN8_1:
+		ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
+		if (!ret)
+			break;
+		fallthrough;
+	case VERSION_WS2008:
+	case VERSION_WIN7:
+		ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
+		break;
+	default:
+		ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+		break;
+	}
+
+	if (ret) {
+		drm_err(dev, "Synthetic video device version not accepted %d\n", ret);
+		goto error;
+	}
+
+	if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
+		hv->screen_depth = SYNTHVID_DEPTH_WIN7;
+	else
+		hv->screen_depth = SYNTHVID_DEPTH_WIN8;
+
+	if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
+		ret = hyperv_get_supported_resolution(hdev);
+		if (ret)
+			drm_err(dev, "Failed to get supported resolution from host, use default\n");
+	} else {
+		hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
+		hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
+	}
+
+	hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
+
+	return 0;
+
+error:
+	vmbus_close(hdev->channel);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 422b59ebf6dce..5f0b85a102159 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11040,7 +11040,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
 		if (ret < 0)
 			goto unpin_fb;
 
-		fence = dma_resv_get_excl_rcu(obj->base.resv);
+		fence = dma_resv_get_excl_unlocked(obj->base.resv);
 		if (fence) {
 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
 						   fence);
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629f..7df91b7e4ca84 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
 void dma_resv_prune(struct dma_resv *resv)
 {
 	if (dma_resv_trylock(resv)) {
-		if (dma_resv_test_signaled_rcu(resv, true))
+		if (dma_resv_test_signaled(resv, true))
 			dma_resv_add_excl_fence(resv, NULL);
 		dma_resv_unlock(resv);
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c10..6234e17259c1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * Alternatively, we can trade that extra information on read/write
 	 * activity with
 	 *	args->busy =
-	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
+	 *		!dma_resv_test_signaled(obj->resv, true);
 	 * to report the overall busyness. This is what the wait-ioctl does.
 	 *
 	 */
@@ -113,11 +113,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	seq = raw_read_seqcount(&obj->base.resv->seq);
 
 	/* Translate the exclusive fence to the READ *and* WRITE engine */
-	args->busy =
-		busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+	args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
 
 	/* Translate shared fences to READ set of engines */
-	list = rcu_dereference(obj->base.resv->fence);
+	list = dma_resv_shared_list(obj->base.resv);
 	if (list) {
 		unsigned int shared_count = list->shared_count, i;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 297143511f99b..66789111a24bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
 	if (DBG_FORCE_RELOC)
 		return false;
 
-	return !dma_resv_test_signaled_rcu(vma->resv, true);
+	return !dma_resv_test_signaled(vma->resv, true);
 }
 
 static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2ebd79537aea9..7c0eb425cb3b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 	struct dma_fence *fence;
 
 	rcu_read_lock();
-	fence = dma_resv_get_excl_rcu(obj->base.resv);
+	fence = dma_resv_get_excl_unlocked(obj->base.resv);
 	rcu_read_unlock();
 
 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec7606..b5cbbe659a77f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
 		return true;
 
 	/* we will unbind on next submission, still have userptr pins */
-	r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
-				      MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(obj->base.resv, true, false,
+				  MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4b9856d5ba14f..1e97520c62b23 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 		unsigned int count, i;
 		int ret;
 
-		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
@@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
 		 */
 		prune_fences = count && timeout >= 0;
 	} else {
-		excl = dma_resv_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_unlocked(resv);
 	}
 
 	if (excl && timeout >= 0)
@@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 		unsigned int count, i;
 		int ret;
 
-		ret = dma_resv_get_fences_rcu(obj->base.resv,
-					      &excl, &count, &shared);
+		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+					  &shared);
 		if (ret)
 			return ret;
 
@@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_unlocked(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 970d8f4986bbe..90185ca1b7634 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = dma_resv_get_fences_rcu(obj->base.resv,
-							&excl, &count, &shared);
+		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+					  &shared);
 		if (ret)
 			return ret;
 
@@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(obj->base.resv);
+		excl = dma_resv_get_excl_unlocked(obj->base.resv);
 	}
 
 	if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2744558f30507..c589a681da77e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		struct dma_fence **shared;
 		unsigned int count, i;
 
-		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
 		if (ret)
 			return ret;
 
@@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
-		excl = dma_resv_get_excl_rcu(resv);
+		excl = dma_resv_get_excl_unlocked(resv);
 	}
 
 	if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 63f411ab393b7..6f4c80bbc0eb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -30,9 +30,8 @@ struct mtk_disp_color_data {
 	unsigned int color_offset;
 };
 
-/**
+/*
  * struct mtk_disp_color - DISP_COLOR driver structure
- * @ddp_comp: structure containing type enum and hardware resources
  * @crtc: associated crtc to report irq events to
  * @data: platform colour driver data
  */
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index 3ebf91e0ab412..3a5815ab40795 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -29,10 +29,8 @@ struct mtk_disp_gamma_data {
 	bool has_dither;
 };
 
-/**
+/*
  * struct mtk_disp_gamma - DISP_GAMMA driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
  */
 struct mtk_disp_gamma {
 	struct clk *clk;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 961f87f8d4d15..fa9d79963cd34 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -66,9 +66,8 @@ struct mtk_disp_ovl_data {
 	bool smi_id_en;
 };
 
-/**
+/*
  * struct mtk_disp_ovl - DISP_OVL driver structure
- * @ddp_comp: structure containing type enum and hardware resources
  * @crtc: associated crtc to report vblank events to
  * @data: platform data
  */
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 728aaadfea8cf..705f28ceb4ddd 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -55,10 +55,8 @@ struct mtk_disp_rdma_data {
 	unsigned int fifo_size;
 };
 
-/**
+/*
  * struct mtk_disp_rdma - DISP_RDMA driver structure
- * @ddp_comp: structure containing type enum and hardware resources
- * @crtc: associated crtc to report irq events to
  * @data: local driver data
  */
 struct mtk_disp_rdma {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 10f693ea89d35..52536e7adb95b 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -7,6 +7,8 @@ config DRM_MSM
 	depends on IOMMU_SUPPORT
 	depends on OF && COMMON_CLK
 	depends on QCOM_OCMEM || QCOM_OCMEM=n
+	depends on QCOM_LLCC || QCOM_LLCC=n
+	depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
 	select IOMMU_IO_PGTABLE
 	select QCOM_MDT_LOADER if ARCH_QCOM
 	select REGULATOR
@@ -15,7 +17,6 @@ config DRM_MSM
 	select SHMEM
 	select TMPFS
 	select QCOM_SCM if ARCH_QCOM
-	select QCOM_COMMAND_DB if ARCH_QCOM
 	select WANT_DEV_COREDUMP
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f7400..a94a43de95efc 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -817,9 +817,9 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 	struct dma_fence *fence;
 	int i, ret;
 
-	fobj = dma_resv_get_list(obj->resv);
+	fobj = dma_resv_shared_list(obj->resv);
 	if (!fobj || (fobj->shared_count == 0)) {
-		fence = dma_resv_get_excl(obj->resv);
+		fence = dma_resv_excl_fence(obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
 		if (fence && (fence->context != fctx->context)) {
 			ret = dma_fence_wait(fence, true);
@@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
-						  true,  remain);
+	ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 	else if (ret < 0)
@@ -1025,7 +1024,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 	}
 
 	rcu_read_lock();
-	fobj = rcu_dereference(robj->fence);
+	fobj = dma_resv_shared_list(robj);
 	if (fobj) {
 		unsigned int i, shared_count = fobj->shared_count;
 
@@ -1035,7 +1034,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 		}
 	}
 
-	fence = rcu_dereference(robj->fence_excl);
+	fence = dma_resv_excl_fence(robj);
 	if (fence)
 		describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0cb1f9d848d3e..8d048bacd6f02 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 			asyw->image.handle[0] = ctxdma->object.handle;
 	}
 
-	asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+	asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
 	asyw->image.offset[0] = nvbo->offset;
 
 	if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 0a9334deffe20..b45ec3086285a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 					NOUVEAU_GEM_DOMAIN_GART;
 	else
-	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
+	if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
 	else
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c390f24f25f3a..520b1ea9d16c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 	if (nvbo->bo.pin_count) {
 		bool error = evict;
 
-		switch (bo->mem.mem_type) {
+		switch (bo->resource->mem_type) {
 		case TTM_PL_VRAM:
 			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
 			break;
@@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 		if (error) {
 			NV_ERROR(drm, "bo %p pinned elsewhere: "
 				      "0x%08x vs 0x%08x\n", bo,
-				 bo->mem.mem_type, domain);
+				 bo->resource->mem_type, domain);
 			ret = -EBUSY;
 		}
 		ttm_bo_pin(&nvbo->bo);
@@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 
 	ttm_bo_pin(&nvbo->bo);
 
-	switch (bo->mem.mem_type) {
+	switch (bo->resource->mem_type) {
 	case TTM_PL_VRAM:
 		drm->gem.vram_available -= bo->base.size;
 		break;
@@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
 	ttm_bo_unpin(&nvbo->bo);
 	if (!nvbo->bo.pin_count) {
-		switch (bo->mem.mem_type) {
+		switch (bo->resource->mem_type) {
 		case TTM_PL_VRAM:
 			drm->gem.vram_available += bo->base.size;
 			break;
@@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
 
 	ttm_bo_unreserve(&nvbo->bo);
 	return ret;
@@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	switch (bo->mem.mem_type) {
+	switch (bo->resource->mem_type) {
 	case TTM_PL_VRAM:
 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
 					 NOUVEAU_GEM_DOMAIN_CPU);
@@ -754,7 +754,7 @@ static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 		     struct ttm_resource *reg)
 {
-	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+	struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
 	struct nouveau_mem *new_mem = nouveau_mem(reg);
 	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
 	int ret;
@@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
 		mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
 	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
 	if (ret == 0) {
-		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
+		ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
 		if (ret == 0) {
 			ret = nouveau_fence_new(chan, false, &fence);
 			if (ret == 0) {
@@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
 		}
 	}
 
-	if (new_reg) {
-		if (new_reg->mm_node)
-			nvbo->offset = (new_reg->start << PAGE_SHIFT);
-		else
-			nvbo->offset = 0;
-	}
+	if (new_reg)
+		nvbo->offset = (new_reg->start << PAGE_SHIFT);
 
 }
 
@@ -955,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+	struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
@@ -969,7 +965,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct ttm_resource *old_reg = &bo->mem;
+	struct ttm_resource *old_reg = bo->resource;
 	struct nouveau_drm_tile *new_tile = NULL;
 	int ret = 0;
 
@@ -1009,7 +1005,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 	if (old_reg->mem_type == TTM_PL_TT &&
 	    new_reg->mem_type == TTM_PL_SYSTEM) {
 		nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
-		ttm_resource_free(bo, &bo->mem);
+		ttm_resource_free(bo, &bo->resource);
 		ttm_bo_assign_mem(bo, new_reg);
 		goto out;
 	}
@@ -1045,7 +1041,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 	}
 out_ntfy:
 	if (ret) {
-		nouveau_bo_move_ntfy(bo, &bo->mem);
+		nouveau_bo_move_ntfy(bo, bo->resource);
 	}
 	return ret;
 }
@@ -1170,7 +1166,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
 			list_del_init(&nvbo->io_reserve_lru);
 			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
 					   bdev->dev_mapping);
-			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+			nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
 			goto retry;
 		}
 
@@ -1200,12 +1196,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
 	 * nothing to do here.
 	 */
-	if (bo->mem.mem_type != TTM_PL_VRAM) {
+	if (bo->resource->mem_type != TTM_PL_VRAM) {
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
 		    !nvbo->kind)
 			return 0;
 
-		if (bo->mem.mem_type != TTM_PL_SYSTEM)
+		if (bo->resource->mem_type != TTM_PL_SYSTEM)
 			return 0;
 
 		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
@@ -1213,7 +1209,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 	} else {
 		/* make sure bo is in mappable vram */
 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-		    bo->mem.start + bo->mem.num_pages < mappable)
+		    bo->resource->start + bo->resource->num_pages < mappable)
 			return 0;
 
 		for (i = 0; i < nvbo->placement.num_placement; ++i) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7cfac265fd452..40362600eed26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		args.start = 0;
 		args.limit = chan->vmm->vmm.limit - 1;
 	} else
-	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+	if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
 		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
 			/* nv04 vram pushbuf hack, retarget to its location in
 			 * the framebuffer bar rather than direct vram access..
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 93ac78bda750e..4f9b3aa5deda9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 			      FBINFO_HWACCEL_FILLRECT |
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->fbops = &nouveau_fbcon_sw_ops;
-	info->fix.smem_start = nvbo->bo.mem.bus.offset;
+	info->fix.smem_start = nvbo->bo.resource->bus.offset;
 	info->fix.smem_len = nvbo->bo.base.size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7ef..6b43918035df5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 			return ret;
 	}
 
-	fobj = dma_resv_get_list(resv);
-	fence = dma_resv_get_excl(resv);
+	fobj = dma_resv_shared_list(resv);
+	fence = dma_resv_excl_fence(resv);
 
 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 		struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 722e1decc2029..5b27845075a1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 
 	if (is_power_of_2(nvbo->valid_domains))
 		rep->domain = nvbo->valid_domains;
-	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 	else
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
-	    bo->mem.mem_type == TTM_PL_VRAM)
+	    bo->resource->mem_type == TTM_PL_VRAM)
 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 
 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
-		 bo->mem.mem_type == TTM_PL_TT)
+		 bo->resource->mem_type == TTM_PL_TT)
 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 
 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
@@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 			if (nvbo->offset == b->presumed.offset &&
-			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
-			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 				continue;
 
-			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 			else
 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 		}
 
 		if (!nvbo->kmap.virtual) {
-			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
 					  &nvbo->kmap);
 			if (ret) {
 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -870,7 +870,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 			if (unlikely(cmd != req->suffix0)) {
 				if (!nvbo->kmap.virtual) {
 					ret = ttm_bo_kmap(&nvbo->bo, 0,
-							  nvbo->bo.mem.
+							  nvbo->bo.resource->
 							  num_pages,
 							  &nvbo->kmap);
 					if (ret) {
@@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
-						   no_wait ? 0 : 30 * HZ);
+	lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+				     no_wait ? 0 : 30 * HZ);
 	if (!lret)
 		ret = -EBUSY;
 	else if (lret > 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a1049e9feee12..0de6549fb875c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -178,25 +178,24 @@ void
 nouveau_mem_del(struct ttm_resource *reg)
 {
 	struct nouveau_mem *mem = nouveau_mem(reg);
-	if (!mem)
-		return;
+
 	nouveau_mem_fini(mem);
-	kfree(reg->mm_node);
-	reg->mm_node = NULL;
+	kfree(mem);
 }
 
 int
 nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
-		struct ttm_resource *reg)
+		struct ttm_resource **res)
 {
 	struct nouveau_mem *mem;
 
 	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
 		return -ENOMEM;
+
 	mem->cli = cli;
 	mem->kind = kind;
 	mem->comp = comp;
 
-	reg->mm_node = mem;
+	*res = &mem->base;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 7df3848e85aae..2c01166a90f25 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -6,13 +6,8 @@ struct ttm_tt;
 #include <nvif/mem.h>
 #include <nvif/vmm.h>
 
-static inline struct nouveau_mem *
-nouveau_mem(struct ttm_resource *reg)
-{
-	return reg->mm_node;
-}
-
 struct nouveau_mem {
+	struct ttm_resource base;
 	struct nouveau_cli *cli;
 	u8 kind;
 	u8 comp;
@@ -20,8 +15,14 @@ struct nouveau_mem {
 	struct nvif_vma vma[2];
 };
 
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_resource *reg)
+{
+	return container_of(reg, struct nouveau_mem, base);
+}
+
 int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
-		    struct ttm_resource *);
+		    struct ttm_resource **);
 void nouveau_mem_del(struct ttm_resource *);
 int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
 int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 65430912ff727..f4c2e46b6fe19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -26,6 +26,8 @@
 #include <linux/limits.h>
 #include <linux/swiotlb.h>
 
+#include <drm/ttm/ttm_range_manager.h>
+
 #include "nouveau_drv.h"
 #include "nouveau_gem.h"
 #include "nouveau_mem.h"
@@ -43,7 +45,7 @@ static int
 nouveau_vram_manager_new(struct ttm_resource_manager *man,
 			 struct ttm_buffer_object *bo,
 			 const struct ttm_place *place,
-			 struct ttm_resource *reg)
+			 struct ttm_resource **res)
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -52,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
 	if (drm->client.device.info.ram_size == 0)
 		return -ENOMEM;
 
-	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 	if (ret)
 		return ret;
 
-	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
+	ttm_resource_init(bo, place, *res);
+
+	ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
 	if (ret) {
-		nouveau_mem_del(reg);
+		nouveau_mem_del(*res);
 		return ret;
 	}
 
@@ -74,17 +78,18 @@ static int
 nouveau_gart_manager_new(struct ttm_resource_manager *man,
 			 struct ttm_buffer_object *bo,
 			 const struct ttm_place *place,
-			 struct ttm_resource *reg)
+			 struct ttm_resource **res)
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	int ret;
 
-	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 	if (ret)
 		return ret;
 
-	reg->start = 0;
+	ttm_resource_init(bo, place, *res);
+	(*res)->start = 0;
 	return 0;
 }
 
@@ -97,26 +102,27 @@ static int
 nv04_gart_manager_new(struct ttm_resource_manager *man,
 		      struct ttm_buffer_object *bo,
 		      const struct ttm_place *place,
-		      struct ttm_resource *reg)
+		      struct ttm_resource **res)
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_mem *mem;
 	int ret;
 
-	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
-	mem = nouveau_mem(reg);
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 	if (ret)
 		return ret;
 
+	mem = nouveau_mem(*res);
+	ttm_resource_init(bo, place, *res);
 	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
-			   (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+			   (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
 	if (ret) {
-		nouveau_mem_del(reg);
+		nouveau_mem_del(*res);
 		return ret;
 	}
 
-	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
+	(*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a49e88129c922..67d6619fcd5ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -77,7 +77,7 @@ int
 nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 		struct nouveau_vma **pvma)
 {
-	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+	struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
 	struct nouveau_vma *vma;
 	struct nvif_vma tmp;
 	int ret;
@@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 	vma->fence = NULL;
 	list_add_tail(&vma->head, &nvbo->vma_list);
 
-	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+	if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
 	    mem->mem.page == nvbo->page) {
 		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
 				   mem->mem.size, &tmp);
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index b1cd8d7dd87d4..07c2e0878c244 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -77,8 +77,8 @@ static int
 nv17_fence_context_new(struct nouveau_channel *chan)
 {
 	struct nv10_fence_priv *priv = chan->drm->fence;
+	struct ttm_resource *reg = priv->bo->bo.resource;
 	struct nv10_fence_chan *fctx;
-	struct ttm_resource *reg = &priv->bo->bo.mem;
 	u32 start = reg->start * PAGE_SIZE;
 	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 1625826505f69..ea1e1f480bfeb 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 {
 	struct nv10_fence_priv *priv = chan->drm->fence;
 	struct nv10_fence_chan *fctx;
-	struct ttm_resource *reg = &priv->bo->bo.mem;
+	struct ttm_resource *reg = priv->bo->bo.resource;
 	u32 start = reg->start * PAGE_SIZE;
 	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index 58db83ebadc5f..a96084b34a788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc)
 		nvkm_wr32(device, 0xb81610, 0x6);
 }
 
-void
+static void
 tu102_mc_intr_unarm(struct nvkm_mc *base)
 {
 	struct tu102_mc *mc = tu102_mc(base);
@@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base)
 	spin_unlock_irqrestore(&mc->lock, flags);
 }
 
-void
+static void
 tu102_mc_intr_rearm(struct nvkm_mc *base)
 {
 	struct tu102_mc *mc = tu102_mc(base);
@@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base)
 	spin_unlock_irqrestore(&mc->lock, flags);
 }
 
-void
+static void
 tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
 {
 	struct tu102_mc *mc = tu102_mc(base);
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index fe5ac3ef90185..4787f0833264a 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -42,6 +42,7 @@ struct kd35t133 {
 	struct gpio_desc *reset_gpio;
 	struct regulator *vdd;
 	struct regulator *iovcc;
+	enum drm_panel_orientation orientation;
 	bool prepared;
 };
 
@@ -216,6 +217,7 @@ static int kd35t133_get_modes(struct drm_panel *panel,
 	connector->display_info.width_mm = mode->width_mm;
 	connector->display_info.height_mm = mode->height_mm;
 	drm_mode_probed_add(connector, mode);
+	drm_connector_set_panel_orientation(connector, ctx->orientation);
 
 	return 1;
 }
@@ -258,6 +260,12 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
 		return ret;
 	}
 
+	ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+	if (ret < 0) {
+		dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+		return ret;
+	}
+
 	mipi_dsi_set_drvdata(dsi, ctx);
 
 	ctx->dev = dev;
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 5e9ccefb88f62..2229f1af2ca8c 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -29,7 +29,7 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-/**
+/*
  * Raspberry Pi 7" touchscreen panel driver.
  *
  * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 4d2a149b202cb..320a2a8fd4592 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -38,7 +38,7 @@
 #define DSI_CMD2_BK1_SPD2		0xC2 /* Source EQ2 Setting */
 #define DSI_CMD2_BK1_MIPISET1		0xD0 /* MIPI Setting 1 */
 
-/**
+/*
  * Command2 with BK function selection.
  *
  * BIT[4, 0]: [CN2, BKXSEL]
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 597cf1459b0a8..f614e98771e49 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -45,6 +45,7 @@ struct panfrost_features {
 	u32 thread_max_workgroup_sz;
 	u32 thread_max_barrier_sz;
 	u32 coherency_features;
+	u32 afbc_features;
 	u32 texture_features[4];
 	u32 js_features[16];
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ca07098a61419..075ec0ef746cf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,6 +63,7 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
 				thread_max_barrier_sz);
 		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+		PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
 		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
 		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
 		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
@@ -311,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 	if (!gem_obj)
 		return -ENOENT;
 
-	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
-						  true, timeout);
+	ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
 	if (!ret)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
 
@@ -547,6 +547,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
  * Panfrost driver version:
  * - 1.0 - initial interface
  * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ * - 1.2 - adds AFBC_FEATURES query
  */
 static const struct drm_driver panfrost_drm_driver = {
 	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -559,7 +560,7 @@ static const struct drm_driver panfrost_drm_driver = {
 	.desc			= "panfrost DRM",
 	.date			= "20180908",
 	.major			= 1,
-	.minor			= 1,
+	.minor			= 2,
 
 	.gem_create_object	= panfrost_gem_create_object,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 2aae636f1cf5c..0e70e27fd8c36 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -228,6 +228,7 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
 	pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
 	pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
 	pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+	pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
 	for (i = 0; i < 4; i++)
 		pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb13221..2df3e999a38d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
+		implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
 }
 
 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index eddaa62ad8b0e..dc9df5457f1c3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -82,6 +82,7 @@
 
 #define GPU_TEXTURE_FEATURES(n)		(0x0B0 + ((n) * 4))
 #define GPU_JS_FEATURES(n)		(0x0C0 + ((n) * 4))
+#define GPU_AFBC_FEATURES		(0x4C)	/* (RO) AFBC support on Bifrost */
 
 #define GPU_SHADER_PRESENT_LO		0x100	/* (RO) Shader core present bitmap, low word */
 #define GPU_SHADER_PRESENT_HI		0x104	/* (RO) Shader core present bitmap, high word */
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 80f6748055e36..3aae387a96af2 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -3,6 +3,7 @@ config DRM_PL111
 	tristate "DRM Support for PL111 CLCD Controller"
 	depends on DRM
 	depends on ARM || ARM64 || COMPILE_TEST
+	depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
 	depends on COMMON_CLK
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 183d15e2cf588..1f9a59601bb15 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 		int rel;
 
 		rcu_read_lock();
-		fobj = rcu_dereference(bo->tbo.base.resv->fence);
+		fobj = dma_resv_shared_list(bo->tbo.base.resv);
 		rel = fobj ? fobj->shared_count : 0;
 		rcu_read_unlock();
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 20a0f3ab84ad3..dd6abee55f56c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -292,12 +292,12 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
 			unsigned long offset)
 {
 	struct qxl_memslot *slot =
-		(bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		(bo->tbo.resource->mem_type == TTM_PL_VRAM)
 		? &qdev->main_slot : &qdev->surfaces_slot;
 
-       /* TODO - need to hold one of the locks to read bo->tbo.mem.start */
+       /* TODO - need to hold one of the locks to read bo->tbo.resource->start */
 
-	return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset);
+	return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
 }
 
 /* qxl_display.c */
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index a635d9fdf8ac3..d636ba6854513 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
 	surf.height = args->height;
 	surf.stride = pitch;
 	surf.format = format;
+	surf.data = 0;
+
 	r = qxl_gem_object_create_with_handle(qdev, file_priv,
 					      QXL_GEM_DOMAIN_CPU,
 					      args->size, &surf, &qobj,
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6e26d70f2f07a..fbb36e3e8564b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -212,14 +212,14 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
 	struct io_mapping *map;
 	struct dma_buf_map bo_map;
 
-	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+	if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
+	else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
 		map = qdev->surface_mapping;
 	else
 		goto fallback;
 
-	offset = bo->tbo.mem.start << PAGE_SHIFT;
+	offset = bo->tbo.resource->start << PAGE_SHIFT;
 	return io_mapping_map_atomic_wc(map, offset + page_offset);
 fallback:
 	if (bo->kptr) {
@@ -266,8 +266,8 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 			       struct qxl_bo *bo, void *pmap)
 {
-	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
-	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
+	if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
+	    (bo->tbo.resource->mem_type != TTM_PL_PRIV))
 		goto fallback;
 
 	io_mapping_unmap_atomic(pmap);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 47afe95d04a1a..19fd39d9a00cb 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -32,6 +32,7 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 #include "qxl_drv.h"
 #include "qxl_object.h"
@@ -131,7 +132,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
 	qbo = to_qxl_bo(bo);
 	qdev = to_qxl(qbo->tbo.base.dev);
 
-	if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+	if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
 		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
@@ -140,7 +141,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
 		       struct ttm_resource *new_mem,
 		       struct ttm_place *hop)
 {
-	struct ttm_resource *old_mem = &bo->mem;
+	struct ttm_resource *old_mem = bo->resource;
 	int ret;
 
 	qxl_bo_move_notify(bo, new_mem);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80a3bee933d6d..9ed2b2700e0a5 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
 	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
 	/* Sort A before B if A is smaller. */
-	return (int)la->robj->tbo.mem.num_pages -
-		(int)lb->robj->tbo.mem.num_pages;
+	return (int)la->robj->tbo.resource->num_pages -
+		(int)lb->robj->tbo.resource->num_pages;
 }
 
 /**
@@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
 	}
 
 	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
-				&rdev->ring_tmp_bo.bo->tbo.mem);
+				rdev->ring_tmp_bo.bo->tbo.resource);
 	if (r)
 		return r;
 
@@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
 			return -EINVAL;
 		}
 
-		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
+		r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
 		if (r)
 			return r;
 
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd0..4066813174194 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+	work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
 	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 	radeon_bo_unreserve(new_rbo);
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ff8849827d615..458f92a708879 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+		r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 		if (!r)
 			r = -EBUSY;
 
@@ -523,13 +523,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+	r = dma_resv_test_signaled(robj->tbo.base.resv, true);
 	if (r == 0)
 		r = -EBUSY;
 	else
 		r = 0;
 
-	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
 	args->domain = radeon_mem_type_to_domain(cur_placement);
 	drm_gem_object_put(gobj);
 	return r;
@@ -552,14 +552,14 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	}
 	robj = gem_to_radeon_bo(gobj);
 
-	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
 	if (ret == 0)
 		r = -EBUSY;
 	else if (ret < 0)
 		r = ret;
 
 	/* Flush HDP cache via MMIO if necessary */
-	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
 	if (rdev->asic->mmio_hdp_flush &&
 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
 		robj->rdev->asic->mmio_hdp_flush(rdev);
@@ -643,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 		goto error_free;
 
 	list_for_each_entry(entry, &list, head) {
-		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
 		/* if anything is swapped out don't swap it in here,
 		   just abort and wait for the next CS */
 		if (domain == RADEON_GEM_DOMAIN_CPU)
@@ -656,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 		goto error_unlock;
 
 	if (bo_va->it.start)
-		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
 
 error_unlock:
 	mutex_unlock(&bo_va->vm->mutex);
@@ -860,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
 		unsigned domain;
 		const char *placement;
 
-		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
 		switch (domain) {
 		case RADEON_GEM_DOMAIN_VRAM:
 			placement = "VRAM";
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c36..9fa88549c89e7 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
 		return true;
 	}
 
-	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
-				      MAX_SCHEDULE_TIMEOUT);
+	r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+				  MAX_SCHEDULE_TIMEOUT);
 	if (r <= 0)
 		DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index cee11c55fd156..bfaaa3c969a38 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 
 	bo = container_of(tbo, struct radeon_bo, tbo);
 
-	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+	radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
 
 	mutex_lock(&bo->rdev->gem.mutex);
 	list_del_init(&bo->list);
@@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 		}
 		return 0;
 	}
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
 	if (r) {
 		return r;
 	}
@@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
 {
 	ttm_bo_unpin(&bo->tbo);
 	if (!bo->tbo.pin_count) {
-		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
 		else
 			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
@@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
 			u32 domain = lobj->preferred_domains;
 			u32 allowed = lobj->allowed_domains;
 			u32 current_domain =
-				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+				radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
 
 			/* Check if this buffer will be moved and don't move it
 			 * if we have moved too many buffers for this IB already.
@@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 
 out:
 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
-			       bo->tbo.mem.start << PAGE_SHIFT,
+			       bo->tbo.resource->start << PAGE_SHIFT,
 			       bo->tbo.base.size);
 	return 0;
 }
@@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 		return 0;
 	}
 
-	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+	if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
 		if (!has_moved)
 			return 0;
 
@@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 	if (!new_mem)
 		return;
 
-	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+	radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
 	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 }
 
@@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	rbo = container_of(bo, struct radeon_bo, tbo);
 	radeon_bo_check_tiling(rbo, 0, 0);
 	rdev = rbo->rdev;
-	if (bo->mem.mem_type != TTM_PL_VRAM)
+	if (bo->resource->mem_type != TTM_PL_VRAM)
 		return 0;
 
-	size = bo->mem.num_pages << PAGE_SHIFT;
-	offset = bo->mem.start << PAGE_SHIFT;
+	size = bo->resource->num_pages << PAGE_SHIFT;
+	offset = bo->resource->start << PAGE_SHIFT;
 	if ((offset + size) <= rdev->mc.visible_vram_size)
 		return 0;
 
@@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
 		r = ttm_bo_validate(bo, &rbo->placement, &ctx);
 	} else if (likely(!r)) {
-		offset = bo->mem.start << PAGE_SHIFT;
+		offset = bo->resource->start << PAGE_SHIFT;
 		/* this should never happen */
 		if ((offset + size) > rdev->mc.visible_vram_size)
 			return VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index fd4116bdde0f5..1739c6a142cdd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 
 	rdev = radeon_get_rdev(bo->tbo.bdev);
 
-	switch (bo->tbo.mem.mem_type) {
+	switch (bo->tbo.resource->mem_type) {
 	case TTM_PL_TT:
 		start = rdev->mc.gtt_start;
 		break;
@@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 		break;
 	}
 
-	return (bo->tbo.mem.start << PAGE_SHIFT) + start;
+	return (bo->tbo.resource->start << PAGE_SHIFT) + start;
 }
 
 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3861c0b98fcf3..c67b6ddb29a47 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
 		return;
 
 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
-		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 			ttm_bo_unmap_virtual(&bo->tbo);
 	}
 }
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076b..9257b60144c4d 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,14 +98,14 @@ int radeon_sync_resv(struct radeon_device *rdev,
 	int r = 0;
 
 	/* always sync to the exclusive fence */
-	f = dma_resv_get_excl(resv);
+	f = dma_resv_excl_fence(resv);
 	fence = f ? to_radeon_fence(f) : NULL;
 	if (fence && fence->rdev == rdev)
 		radeon_sync_fence(sync, fence);
 	else if (f)
 		r = dma_fence_wait(f, true);
 
-	flist = dma_resv_get_list(resv);
+	flist = dma_resv_shared_list(resv);
 	if (shared || !flist || r)
 		return r;
 
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 1729cb9a95c55..c9fed5f2b8706 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.mem.num_pages;
+			   __entry->pages = bo->tbo.resource->num_pages;
 			   ),
 	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a71d94f7067bb..ad2a5a791bbaf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,6 +45,7 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 #include "radeon_reg.h"
 #include "radeon.h"
@@ -98,12 +99,12 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
 		return;
 	}
 	rbo = container_of(bo, struct radeon_bo, tbo);
-	switch (bo->mem.mem_type) {
+	switch (bo->resource->mem_type) {
 	case TTM_PL_VRAM:
 		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 		else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
-			 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+			 bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
 			unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
 			int i;
 
@@ -195,9 +196,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 			  struct ttm_resource *new_mem,
 			  struct ttm_place *hop)
 {
+	struct ttm_resource *old_mem = bo->resource;
 	struct radeon_device *rdev;
 	struct radeon_bo *rbo;
-	struct ttm_resource *old_mem = &bo->mem;
 	int r;
 
 	if (new_mem->mem_type == TTM_PL_TT) {
@@ -229,7 +230,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 	if (old_mem->mem_type == TTM_PL_TT &&
 	    new_mem->mem_type == TTM_PL_SYSTEM) {
 		radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
-		ttm_resource_free(bo, &bo->mem);
+		ttm_resource_free(bo, &bo->resource);
 		ttm_bo_assign_mem(bo, new_mem);
 		goto out;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da2..1f5b1a5c0a092 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 		return -EINVAL;
 	}
 
-	f = dma_resv_get_excl(bo->tbo.base.resv);
+	f = dma_resv_excl_fence(bo->tbo.base.resv);
 	if (f) {
 		r = radeon_fence_wait((struct radeon_fence *)f, false);
 		if (r) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 5c2b650b561d5..03f3377f918c0 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -272,7 +272,7 @@ static void hda_write(struct sti_hda *hda, u32 val, int offset)
 }
 
 /**
- * Search for a video mode in the supported modes table
+ * hda_get_mode_idx - Search for a video mode in the supported modes table
  *
  * @mode: mode being searched
  * @idx: index of the found mode
@@ -292,7 +292,7 @@ static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
 }
 
 /**
- * Enable the HD DACS
+ * hda_enable_hd_dacs - Enable the HD DACS
  *
  * @hda: pointer to HD analog structure
  * @enable: true if HD DACS need to be enabled, else false
@@ -380,7 +380,7 @@ static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
 }
 
 /**
- * Configure AWG, writing instructions
+ * sti_hda_configure_awg - Configure AWG, writing instructions
  *
  * @hda: pointer to HD analog structure
  * @awg_instr: pointer to AWG instructions table
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
index d5f94dca0d323..d25ecd4f4b673 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -67,7 +67,7 @@ static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
 };
 
 /**
- * Start hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_start - Start hdmi phy macro cell tx3g4c28
  *
  * @hdmi: pointer on the hdmi internal structure
  *
@@ -179,7 +179,7 @@ static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
 }
 
 /**
- * Stop hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_stop - Stop hdmi phy macro cell tx3g4c28
  *
  * @hdmi: pointer on the hdmi internal structure
  */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index edbb99f53de19..d09b08995b12a 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -782,7 +782,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
 }
 
 /**
- * sti_vdp_vtg_cb
+ * sti_hqvdp_vtg_cb
  * @nb: notifier block
  * @evt: event message
  * @data: private data
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index df3817f0fd302..2499715a69b79 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -153,7 +153,7 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
 }
 
 /**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_color_order - Set the clipping mode of a VIP
  *
  * @tvout: tvout structure
  * @reg: register to set
@@ -177,7 +177,7 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
 }
 
 /**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_clip_mode - Set the clipping mode of a VIP
  *
  * @tvout: tvout structure
  * @reg: register to set
@@ -193,7 +193,7 @@ static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
 }
 
 /**
- * Set the rounded value of a VIP
+ * tvout_vip_set_rnd - Set the rounded value of a VIP
  *
  * @tvout: tvout structure
  * @reg: register to set
@@ -209,7 +209,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
 }
 
 /**
- * Select the VIP input
+ * tvout_vip_set_sel_input - Select the VIP input
  *
  * @tvout: tvout structure
  * @reg: register to set
@@ -247,7 +247,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
 }
 
 /**
- * Select the input video signed or unsigned
+ * tvout_vip_set_in_vid_fmt - Select the input video signed or unsigned
  *
  * @tvout: tvout structure
  * @reg: register to set
@@ -264,7 +264,7 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
 }
 
 /**
- * Set preformatter matrix
+ * tvout_preformatter_set_matrix - Set preformatter matrix
  *
  * @tvout: tvout structure
  * @mode: display mode structure
@@ -289,7 +289,7 @@ static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
 }
 
 /**
- * Start VIP block for DVO output
+ * tvout_dvo_start - Start VIP block for DVO output
  *
  * @tvout: pointer on tvout structure
  * @main_path: true if main path has to be used in the vip configuration
@@ -343,7 +343,7 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
 }
 
 /**
- * Start VIP block for HDMI output
+ * tvout_hdmi_start - Start VIP block for HDMI output
  *
  * @tvout: pointer on tvout structure
  * @main_path: true if main path has to be used in the vip configuration
@@ -392,7 +392,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
 }
 
 /**
- * Start HDF VIP and HD DAC
+ * tvout_hda_start - Start HDF VIP and HD DAC
  *
  * @tvout: pointer on tvout structure
  * @main_path: true if main path has to be used in the vip configuration
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index e99771b947b68..08b71248044de 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -531,7 +531,6 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	struct drm_encoder *encoder = NULL;
 	struct drm_bridge *bridge = NULL;
 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
-	struct videomode vm;
 	u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
 	u32 total_width, total_height;
 	u32 bus_flags = 0;
@@ -570,31 +569,33 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 		}
 	}
 
-	drm_display_mode_to_videomode(mode, &vm);
-
 	DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
-	DRM_DEBUG_DRIVER("Video mode: %dx%d", vm.hactive, vm.vactive);
+	DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
 	DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
-			 vm.hfront_porch, vm.hback_porch, vm.hsync_len,
-			 vm.vfront_porch, vm.vback_porch, vm.vsync_len);
+			 mode->hsync_start - mode->hdisplay,
+			 mode->htotal - mode->hsync_end,
+			 mode->hsync_end - mode->hsync_start,
+			 mode->vsync_start - mode->vdisplay,
+			 mode->vtotal - mode->vsync_end,
+			 mode->vsync_end - mode->vsync_start);
 
 	/* Convert video timings to ltdc timings */
-	hsync = vm.hsync_len - 1;
-	vsync = vm.vsync_len - 1;
-	accum_hbp = hsync + vm.hback_porch;
-	accum_vbp = vsync + vm.vback_porch;
-	accum_act_w = accum_hbp + vm.hactive;
-	accum_act_h = accum_vbp + vm.vactive;
-	total_width = accum_act_w + vm.hfront_porch;
-	total_height = accum_act_h + vm.vfront_porch;
+	hsync = mode->hsync_end - mode->hsync_start - 1;
+	vsync = mode->vsync_end - mode->vsync_start - 1;
+	accum_hbp = mode->htotal - mode->hsync_start - 1;
+	accum_vbp = mode->vtotal - mode->vsync_start - 1;
+	accum_act_w = accum_hbp + mode->hdisplay;
+	accum_act_h = accum_vbp + mode->vdisplay;
+	total_width = mode->htotal - 1;
+	total_height = mode->vtotal - 1;
 
 	/* Configures the HS, VS, DE and PC polarities. Default Active Low */
 	val = 0;
 
-	if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
 		val |= GCR_HSPOL;
 
-	if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
 		val |= GCR_VSPOL;
 
 	if (bus_flags & DRM_BUS_FLAG_DE_LOW)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 0db164a774a1f..e779855bcd6e3 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -370,6 +370,11 @@ static const u32 sun8i_ui_layer_formats[] = {
 	DRM_FORMAT_XRGB8888,
 };
 
+static const uint64_t sun8i_layer_modifiers[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
 struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
 					       struct sun8i_mixer *mixer,
 					       int index)
@@ -392,7 +397,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
 				       &sun8i_ui_layer_funcs,
 				       sun8i_ui_layer_formats,
 				       ARRAY_SIZE(sun8i_ui_layer_formats),
-				       NULL, type, NULL);
+				       sun8i_layer_modifiers, type, NULL);
 	if (ret) {
 		dev_err(drm->dev, "Couldn't initialize layer\n");
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 46420780db598..1c86c2dd0bbf1 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -534,6 +534,11 @@ static const u32 sun8i_vi_layer_de3_formats[] = {
 	DRM_FORMAT_YVU422,
 };
 
+static const uint64_t sun8i_layer_modifiers[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
 struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
 					       struct sun8i_mixer *mixer,
 					       int index)
@@ -560,7 +565,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
 	ret = drm_universal_plane_init(drm, &layer->plane, 0,
 				       &sun8i_vi_layer_funcs,
 				       formats, format_count,
-				       NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+				       sun8i_layer_modifiers,
+				       DRM_PLANE_TYPE_OVERLAY, NULL);
 	if (ret) {
 		dev_err(drm->dev, "Couldn't initialize layer\n");
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 51a94fd63bd79..db53fecca696a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
 	int i, mem_type;
 
 	drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
-		   bo, bo->mem.num_pages, bo->base.size >> 10,
+		   bo, bo->resource->num_pages, bo->base.size >> 10,
 		   bo->base.size >> 20);
 	for (i = 0; i < placement->num_placement; i++) {
 		mem_type = placement->placement[i].mem_type;
@@ -109,7 +109,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 		bdev->funcs->del_from_lru_notify(bo);
 
 	if (bulk && !bo->pin_count) {
-		switch (bo->mem.mem_type) {
+		switch (bo->resource->mem_type) {
 		case TTM_PL_TT:
 			ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
 			break;
@@ -163,11 +163,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 				  struct ttm_operation_ctx *ctx,
 				  struct ttm_place *hop)
 {
+	struct ttm_resource_manager *old_man, *new_man;
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
-	struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
 	int ret;
 
+	old_man = ttm_manager_type(bdev, bo->resource->mem_type);
+	new_man = ttm_manager_type(bdev, mem->mem_type);
+
 	ttm_bo_unmap_virtual(bo);
 
 	/*
@@ -200,7 +202,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 	return 0;
 
 out_err:
-	new_man = ttm_manager_type(bdev, bo->mem.mem_type);
+	new_man = ttm_manager_type(bdev, bo->resource->mem_type);
 	if (!new_man->use_tt)
 		ttm_bo_tt_destroy(bo);
 
@@ -221,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 		bo->bdev->funcs->delete_mem_notify(bo);
 
 	ttm_bo_tt_destroy(bo);
-	ttm_resource_free(bo, &bo->mem);
+	ttm_resource_free(bo, &bo->resource);
 }
 
 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -259,8 +261,8 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 	int i;
 
 	rcu_read_lock();
-	fobj = rcu_dereference(resv->fence);
-	fence = rcu_dereference(resv->fence_excl);
+	fobj = dma_resv_shared_list(resv);
+	fence = dma_resv_excl_fence(resv);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
 
@@ -294,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	struct dma_resv *resv = &bo->base._resv;
 	int ret;
 
-	if (dma_resv_test_signaled_rcu(resv, true))
+	if (dma_resv_test_signaled(resv, true))
 		ret = 0;
 	else
 		ret = -EBUSY;
@@ -306,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&bo->bdev->lru_lock);
 
-		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
-						 30 * HZ);
+		lret = dma_resv_wait_timeout(resv, true, interruptible,
+					     30 * HZ);
 
 		if (lret < 0)
 			return lret;
@@ -409,18 +411,18 @@ static void ttm_bo_release(struct kref *kref)
 			/* Last resort, if we fail to allocate memory for the
 			 * fences block for the BO to become idle
 			 */
-			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
-						  30 * HZ);
+			dma_resv_wait_timeout(bo->base.resv, true, false,
+					      30 * HZ);
 		}
 
 		if (bo->bdev->funcs->release_notify)
 			bo->bdev->funcs->release_notify(bo);
 
 		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
-		ttm_mem_io_free(bdev, &bo->mem);
+		ttm_mem_io_free(bdev, bo->resource);
 	}
 
-	if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+	if (!dma_resv_test_signaled(bo->base.resv, true) ||
 	    !dma_resv_trylock(bo->base.resv)) {
 		/* The BO is not idle, resurrect it for delayed destroy */
 		ttm_bo_flush_all_fences(bo);
@@ -438,7 +440,7 @@ static void ttm_bo_release(struct kref *kref)
 		 */
 		if (bo->pin_count) {
 			bo->pin_count = 0;
-			ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+			ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
 		}
 
 		kref_init(&bo->kref);
@@ -487,7 +489,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 			struct ttm_operation_ctx *ctx)
 {
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource evict_mem;
+	struct ttm_resource *evict_mem;
 	struct ttm_placement placement;
 	struct ttm_place hop;
 	int ret = 0;
@@ -501,10 +503,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 	bdev->funcs->evict_flags(bo, &placement);
 
 	if (!placement.num_placement && !placement.num_busy_placement) {
-		ttm_bo_wait(bo, false, false);
+		ret = ttm_bo_wait(bo, true, false);
+		if (ret)
+			return ret;
 
-		ttm_bo_cleanup_memtype_use(bo);
-		return ttm_tt_create(bo, false);
+		/*
+		 * Since we've already synced, this frees backing store
+		 * immediately.
+		 */
+		return ttm_bo_pipeline_gutting(bo);
 	}
 
 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
@@ -517,7 +524,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 		goto out;
 	}
 
-	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
+	ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
 	if (unlikely(ret)) {
 		WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
 		if (ret != -ERESTARTSYS)
@@ -531,11 +538,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 			      const struct ttm_place *place)
 {
+	dma_resv_assert_held(bo->base.resv);
+	if (bo->resource->mem_type == TTM_PL_SYSTEM)
+		return true;
+
 	/* Don't evict this BO if it's outside of the
 	 * requested placement range
 	 */
-	if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
-	    (place->lpfn && place->lpfn <= bo->mem.start))
+	if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
+	    (place->lpfn && place->lpfn <= bo->resource->start))
 		return false;
 
 	return true;
@@ -553,7 +564,9 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
  * b. Otherwise, trylock it.
  */
 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
-			struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
+					   struct ttm_operation_ctx *ctx,
+					   const struct ttm_place *place,
+					   bool *locked, bool *busy)
 {
 	bool ret = false;
 
@@ -571,6 +584,14 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 			*busy = !ret;
 	}
 
+	if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) {
+		ret = false;
+		if (*locked) {
+			dma_resv_unlock(bo->base.resv);
+			*locked = false;
+		}
+	}
+
 	return ret;
 }
 
@@ -625,20 +646,14 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
 		list_for_each_entry(bo, &man->lru[i], lru) {
 			bool busy;
 
-			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
-							    &busy)) {
+			if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
+							    &locked, &busy)) {
 				if (busy && !busy_bo && ticket !=
 				    dma_resv_locking_ctx(bo->base.resv))
 					busy_bo = bo;
 				continue;
 			}
 
-			if (place && !bdev->funcs->eviction_valuable(bo,
-								      place)) {
-				if (locked)
-					dma_resv_unlock(bo->base.resv);
-				continue;
-			}
 			if (!ttm_bo_get_unless_zero(bo)) {
 				if (locked)
 					dma_resv_unlock(bo->base.resv);
@@ -726,14 +741,15 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
  */
 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 				  const struct ttm_place *place,
-				  struct ttm_resource *mem,
+				  struct ttm_resource **mem,
 				  struct ttm_operation_ctx *ctx)
 {
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
+	struct ttm_resource_manager *man;
 	struct ww_acquire_ctx *ticket;
 	int ret;
 
+	man = ttm_manager_type(bdev, place->mem_type);
 	ticket = dma_resv_locking_ctx(bo->base.resv);
 	do {
 		ret = ttm_resource_alloc(bo, place, mem);
@@ -747,37 +763,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 			return ret;
 	} while (1);
 
-	return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_placement - check if placement is compatible
- * @bo: BO to find memory for
- * @place: where to search
- * @mem: the memory object to fill in
- *
- * Check if placement is compatible and fill in mem structure.
- * Returns -EBUSY if placement won't work or negative error code.
- * 0 when placement can be used.
- */
-static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
-				const struct ttm_place *place,
-				struct ttm_resource *mem)
-{
-	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man;
-
-	man = ttm_manager_type(bdev, place->mem_type);
-	if (!man || !ttm_resource_manager_used(man))
-		return -EBUSY;
-
-	mem->mem_type = place->mem_type;
-	mem->placement = place->flags;
-
-	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_move_to_lru_tail(bo, mem, NULL);
-	spin_unlock(&bo->bdev->lru_lock);
-	return 0;
+	return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
 }
 
 /*
@@ -790,7 +776,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
-			struct ttm_resource *mem,
+			struct ttm_resource **mem,
 			struct ttm_operation_ctx *ctx)
 {
 	struct ttm_device *bdev = bo->bdev;
@@ -805,8 +791,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 		const struct ttm_place *place = &placement->placement[i];
 		struct ttm_resource_manager *man;
 
-		ret = ttm_bo_mem_placement(bo, place, mem);
-		if (ret)
+		man = ttm_manager_type(bdev, place->mem_type);
+		if (!man || !ttm_resource_manager_used(man))
 			continue;
 
 		type_found = true;
@@ -816,8 +802,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 		if (unlikely(ret))
 			goto error;
 
-		man = ttm_manager_type(bdev, mem->mem_type);
-		ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+		ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
 		if (unlikely(ret)) {
 			ttm_resource_free(bo, mem);
 			if (ret == -EBUSY)
@@ -830,9 +815,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
 	for (i = 0; i < placement->num_busy_placement; ++i) {
 		const struct ttm_place *place = &placement->busy_placement[i];
+		struct ttm_resource_manager *man;
 
-		ret = ttm_bo_mem_placement(bo, place, mem);
-		if (ret)
+		man = ttm_manager_type(bdev, place->mem_type);
+		if (!man || !ttm_resource_manager_used(man))
 			continue;
 
 		type_found = true;
@@ -851,7 +837,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 	}
 
 error:
-	if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
+	if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
 		ttm_bo_move_to_lru_tail_unlocked(bo);
 
 	return ret;
@@ -859,12 +845,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
-				     struct ttm_resource *mem,
+				     struct ttm_resource **mem,
 				     struct ttm_operation_ctx *ctx,
 				     struct ttm_place *hop)
 {
 	struct ttm_placement hop_placement;
-	struct ttm_resource hop_mem;
+	struct ttm_resource *hop_mem;
 	int ret;
 
 	hop_placement.num_placement = hop_placement.num_busy_placement = 1;
@@ -875,7 +861,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
 	if (ret)
 		return ret;
 	/* move to the bounce domain */
-	ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+	ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
 	if (ret) {
 		ttm_resource_free(bo, &hop_mem);
 		return ret;
@@ -887,14 +873,12 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 			      struct ttm_placement *placement,
 			      struct ttm_operation_ctx *ctx)
 {
+	struct ttm_resource *mem;
 	struct ttm_place hop;
-	struct ttm_resource mem;
 	int ret;
 
 	dma_resv_assert_held(bo->base.resv);
 
-	memset(&hop, 0, sizeof(hop));
-
 	/*
 	 * Determine where to move the buffer.
 	 *
@@ -908,7 +892,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 	if (ret)
 		return ret;
 bounce:
-	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+	ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
 	if (ret == -EMULTIHOP) {
 		ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
 		if (ret)
@@ -976,18 +960,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 	/*
 	 * Remove the backing store if no placement is given.
 	 */
-	if (!placement->num_placement && !placement->num_busy_placement) {
-		ret = ttm_bo_pipeline_gutting(bo);
-		if (ret)
-			return ret;
-
-		return ttm_tt_create(bo, false);
-	}
+	if (!placement->num_placement && !placement->num_busy_placement)
+		return ttm_bo_pipeline_gutting(bo);
 
 	/*
 	 * Check whether we need to move buffer.
 	 */
-	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+	if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
 		ret = ttm_bo_move_buffer(bo, placement, ctx);
 		if (ret)
 			return ret;
@@ -995,7 +974,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 	/*
 	 * We might need to add a TTM.
 	 */
-	if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+	if (bo->resource->mem_type == TTM_PL_SYSTEM) {
 		ret = ttm_tt_create(bo, true);
 		if (ret)
 			return ret;
@@ -1017,7 +996,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 {
 	static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
 	bool locked;
-	int ret = 0;
+	int ret;
 
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
@@ -1027,7 +1006,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	bo->bdev = bdev;
 	bo->type = type;
 	bo->page_alignment = page_alignment;
-	ttm_resource_alloc(bo, &sys_mem, &bo->mem);
 	bo->moving = NULL;
 	bo->pin_count = 0;
 	bo->sg = sg;
@@ -1039,6 +1017,12 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	}
 	atomic_inc(&ttm_glob.bo_count);
 
+	ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
+	if (unlikely(ret)) {
+		ttm_bo_put(bo);
+		return ret;
+	}
+
 	/*
 	 * For ttm_bo_type_device buffers, allocate
 	 * address space from the device.
@@ -1046,7 +1030,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
 	if (bo->type == ttm_bo_type_device ||
 	    bo->type == ttm_bo_type_sg)
 		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
-					 bo->mem.num_pages);
+					 bo->resource->num_pages);
 
 	/* passed reservation objects should already be locked,
 	 * since otherwise lockdep will be angered in radeon.
@@ -1108,7 +1092,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 	struct ttm_device *bdev = bo->bdev;
 
 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
-	ttm_mem_io_free(bdev, &bo->mem);
+	ttm_mem_io_free(bdev, bo->resource);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
@@ -1118,14 +1102,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	long timeout = 15 * HZ;
 
 	if (no_wait) {
-		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+		if (dma_resv_test_signaled(bo->base.resv, true))
 			return 0;
 		else
 			return -EBUSY;
 	}
 
-	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
-						      interruptible, timeout);
+	timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
+					timeout);
 	if (timeout < 0)
 		return timeout;
 
@@ -1140,10 +1124,19 @@ EXPORT_SYMBOL(ttm_bo_wait);
 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
 		   gfp_t gfp_flags)
 {
+	struct ttm_place place;
 	bool locked;
 	int ret;
 
-	if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
+	/*
+	 * While the bo may already reside in SYSTEM placement, set
+	 * SYSTEM as new placement to cover also the move further below.
+	 * The driver may use the fact that we're moving from SYSTEM
+	 * as an indication that we're about to swap out.
+	 */
+	memset(&place, 0, sizeof(place));
+	place.mem_type = TTM_PL_SYSTEM;
+	if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
 		return -EBUSY;
 
 	if (!ttm_bo_get_unless_zero(bo)) {
@@ -1165,21 +1158,17 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
 	/*
 	 * Move to system cached
 	 */
-	if (bo->mem.mem_type != TTM_PL_SYSTEM) {
+	if (bo->resource->mem_type != TTM_PL_SYSTEM) {
 		struct ttm_operation_ctx ctx = { false, false };
-		struct ttm_resource evict_mem;
-		struct ttm_place place, hop;
+		struct ttm_resource *evict_mem;
+		struct ttm_place hop;
 
-		memset(&place, 0, sizeof(place));
 		memset(&hop, 0, sizeof(hop));
-
-		place.mem_type = TTM_PL_SYSTEM;
-
 		ret = ttm_resource_alloc(bo, &place, &evict_mem);
 		if (unlikely(ret))
 			goto out;
 
-		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
+		ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
 		if (unlikely(ret != 0)) {
 			WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
 			goto out;
@@ -1202,7 +1191,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
 	if (bo->bdev->funcs->swap_notify)
 		bo->bdev->funcs->swap_notify(bo);
 
-	ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+	if (ttm_tt_is_populated(bo->ttm))
+		ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
 out:
 
 	/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae8b614607245..2f57f824e6dbd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -31,6 +31,7 @@
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/drm_cache.h>
 #include <drm/drm_vma_manager.h>
 #include <linux/dma-buf-map.h>
 #include <linux/io.h>
@@ -72,190 +73,112 @@ void ttm_mem_io_free(struct ttm_device *bdev,
 	mem->bus.addr = NULL;
 }
 
-static int ttm_resource_ioremap(struct ttm_device *bdev,
-			       struct ttm_resource *mem,
-			       void **virtual)
+/**
+ * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
+ * @bo: The struct ttm_buffer_object.
+ * @new_mem: The struct ttm_resource we're moving to (copy destination).
+ * @new_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @src_iter: A struct ttm_kmap_iter representing the source resource.
+ *
+ * This function is intended to be able to move out async under a
+ * dma-fence if desired.
+ */
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+		     u32 num_pages,
+		     struct ttm_kmap_iter *dst_iter,
+		     struct ttm_kmap_iter *src_iter)
 {
-	int ret;
-	void *addr;
-
-	*virtual = NULL;
-	ret = ttm_mem_io_reserve(bdev, mem);
-	if (ret || !mem->bus.is_iomem)
-		return ret;
+	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
+	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
+	struct ttm_tt *ttm = bo->ttm;
+	struct dma_buf_map src_map, dst_map;
+	pgoff_t i;
 
-	if (mem->bus.addr) {
-		addr = mem->bus.addr;
-	} else {
-		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+	/* Single TTM move. NOP */
+	if (dst_ops->maps_tt && src_ops->maps_tt)
+		return;
 
-		if (mem->bus.caching == ttm_write_combined)
-			addr = ioremap_wc(mem->bus.offset, bus_size);
-#ifdef CONFIG_X86
-		else if (mem->bus.caching == ttm_cached)
-			addr = ioremap_cache(mem->bus.offset, bus_size);
-#endif
-		else
-			addr = ioremap(mem->bus.offset, bus_size);
-		if (!addr) {
-			ttm_mem_io_free(bdev, mem);
-			return -ENOMEM;
+	/* Don't move nonexistent data. Clear destination instead. */
+	if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
+		if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+			return;
+
+		for (i = 0; i < num_pages; ++i) {
+			dst_ops->map_local(dst_iter, &dst_map, i);
+			if (dst_map.is_iomem)
+				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
+			else
+				memset(dst_map.vaddr, 0, PAGE_SIZE);
+			if (dst_ops->unmap_local)
+				dst_ops->unmap_local(dst_iter, &dst_map);
 		}
+		return;
 	}
-	*virtual = addr;
-	return 0;
-}
-
-static void ttm_resource_iounmap(struct ttm_device *bdev,
-				struct ttm_resource *mem,
-				void *virtual)
-{
-	if (virtual && mem->bus.addr == NULL)
-		iounmap(virtual);
-	ttm_mem_io_free(bdev, mem);
-}
-
-static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
-{
-	uint32_t *dstP =
-	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
-	uint32_t *srcP =
-	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-
-	int i;
-	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
-		iowrite32(ioread32(srcP++), dstP++);
-	return 0;
-}
-
-static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
-				unsigned long page,
-				pgprot_t prot)
-{
-	struct page *d = ttm->pages[page];
-	void *dst;
-
-	if (!d)
-		return -ENOMEM;
-
-	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-	dst = kmap_atomic_prot(d, prot);
-	if (!dst)
-		return -ENOMEM;
-
-	memcpy_fromio(dst, src, PAGE_SIZE);
-
-	kunmap_atomic(dst);
-
-	return 0;
-}
-
-static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
-				unsigned long page,
-				pgprot_t prot)
-{
-	struct page *s = ttm->pages[page];
-	void *src;
-
-	if (!s)
-		return -ENOMEM;
-
-	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-	src = kmap_atomic_prot(s, prot);
-	if (!src)
-		return -ENOMEM;
 
-	memcpy_toio(dst, src, PAGE_SIZE);
+	for (i = 0; i < num_pages; ++i) {
+		dst_ops->map_local(dst_iter, &dst_map, i);
+		src_ops->map_local(src_iter, &src_map, i);
 
-	kunmap_atomic(src);
+		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
 
-	return 0;
+		if (src_ops->unmap_local)
+			src_ops->unmap_local(src_iter, &src_map);
+		if (dst_ops->unmap_local)
+			dst_ops->unmap_local(dst_iter, &dst_map);
+	}
 }
+EXPORT_SYMBOL(ttm_move_memcpy);
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 		       struct ttm_operation_ctx *ctx,
-		       struct ttm_resource *new_mem)
+		       struct ttm_resource *dst_mem)
 {
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+	struct ttm_resource_manager *dst_man =
+		ttm_manager_type(bo->bdev, dst_mem->mem_type);
 	struct ttm_tt *ttm = bo->ttm;
-	struct ttm_resource *old_mem = &bo->mem;
-	struct ttm_resource old_copy = *old_mem;
-	void *old_iomap;
-	void *new_iomap;
-	int ret;
-	unsigned long i;
-
-	ret = ttm_bo_wait_ctx(bo, ctx);
-	if (ret)
-		return ret;
-
-	ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
-	if (ret)
-		return ret;
-	ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
-	if (ret)
-		goto out;
-
-	/*
-	 * Single TTM move. NOP.
-	 */
-	if (old_iomap == NULL && new_iomap == NULL)
-		goto out2;
-
-	/*
-	 * Don't move nonexistent data. Clear destination instead.
-	 */
-	if (old_iomap == NULL &&
-	    (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
-			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
-		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
-		goto out2;
-	}
+	struct ttm_resource *src_mem = bo->resource;
+	struct ttm_resource_manager *src_man =
+		ttm_manager_type(bdev, src_mem->mem_type);
+	struct ttm_resource src_copy = *src_mem;
+	union {
+		struct ttm_kmap_iter_tt tt;
+		struct ttm_kmap_iter_linear_io io;
+	} _dst_iter, _src_iter;
+	struct ttm_kmap_iter *dst_iter, *src_iter;
+	int ret = 0;
 
-	/*
-	 * TTM might be null for moves within the same region.
-	 */
-	if (ttm) {
+	if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+		    dst_man->use_tt)) {
 		ret = ttm_tt_populate(bdev, ttm, ctx);
 		if (ret)
-			goto out1;
+			return ret;
 	}
 
-	for (i = 0; i < new_mem->num_pages; ++i) {
-		if (old_iomap == NULL) {
-			pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
-			ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
-						   prot);
-		} else if (new_iomap == NULL) {
-			pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
-			ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
-						   prot);
-		} else {
-			ret = ttm_copy_io_page(new_iomap, old_iomap, i);
-		}
-		if (ret)
-			goto out1;
+	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
+	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
+		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+	if (IS_ERR(dst_iter))
+		return PTR_ERR(dst_iter);
+
+	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
+	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
+		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+	if (IS_ERR(src_iter)) {
+		ret = PTR_ERR(src_iter);
+		goto out_src_iter;
 	}
-	mb();
-out2:
-	old_copy = *old_mem;
 
-	ttm_bo_assign_mem(bo, new_mem);
+	ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+	src_copy = *src_mem;
+	ttm_bo_move_sync_cleanup(bo, dst_mem);
 
-	if (!man->use_tt)
-		ttm_bo_tt_destroy(bo);
+	if (!src_iter->ops->maps_tt)
+		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+out_src_iter:
+	if (!dst_iter->ops->maps_tt)
+		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
 
-out1:
-	ttm_resource_iounmap(bdev, old_mem, new_iomap);
-out:
-	ttm_resource_iounmap(bdev, &old_copy, old_iomap);
-
-	/*
-	 * On error, keep the mm node!
-	 */
-	if (!ret)
-		ttm_resource_free(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -336,27 +259,7 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
 	man = ttm_manager_type(bo->bdev, res->mem_type);
 	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
 
-	/* Cached mappings need no adjustment */
-	if (caching == ttm_cached)
-		return tmp;
-
-#if defined(__i386__) || defined(__x86_64__)
-	if (caching == ttm_write_combined)
-		tmp = pgprot_writecombine(tmp);
-	else if (boot_cpu_data.x86 > 3)
-		tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
-    defined(__powerpc__) || defined(__mips__)
-	if (caching == ttm_write_combined)
-		tmp = pgprot_writecombine(tmp);
-	else
-		tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__)
-	tmp = pgprot_noncached(tmp);
-#endif
-	return tmp;
+	return ttm_prot_from_caching(caching, tmp);
 }
 EXPORT_SYMBOL(ttm_io_prot);
 
@@ -365,24 +268,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 			  unsigned long size,
 			  struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_resource *mem = &bo->mem;
+	struct ttm_resource *mem = bo->resource;
 
-	if (bo->mem.bus.addr) {
+	if (bo->resource->bus.addr) {
 		map->bo_kmap_type = ttm_bo_map_premapped;
-		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
 	} else {
+		resource_size_t res = bo->resource->bus.offset + offset;
+
 		map->bo_kmap_type = ttm_bo_map_iomap;
 		if (mem->bus.caching == ttm_write_combined)
-			map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
-						  size);
+			map->virtual = ioremap_wc(res, size);
 #ifdef CONFIG_X86
 		else if (mem->bus.caching == ttm_cached)
-			map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
-						  size);
+			map->virtual = ioremap_cache(res, size);
 #endif
 		else
-			map->virtual = ioremap(bo->mem.bus.offset + offset,
-					       size);
+			map->virtual = ioremap(res, size);
 	}
 	return (!map->virtual) ? -ENOMEM : 0;
 }
@@ -392,7 +294,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 			   unsigned long num_pages,
 			   struct ttm_bo_kmap_obj *map)
 {
-	struct ttm_resource *mem = &bo->mem;
+	struct ttm_resource *mem = bo->resource;
 	struct ttm_operation_ctx ctx = {
 		.interruptible = false,
 		.no_wait_gpu = false
@@ -438,15 +340,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 
 	map->virtual = NULL;
 	map->bo = bo;
-	if (num_pages > bo->mem.num_pages)
+	if (num_pages > bo->resource->num_pages)
 		return -EINVAL;
-	if ((start_page + num_pages) > bo->mem.num_pages)
+	if ((start_page + num_pages) > bo->resource->num_pages)
 		return -EINVAL;
 
-	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
 	if (ret)
 		return ret;
-	if (!bo->mem.bus.is_iomem) {
+	if (!bo->resource->bus.is_iomem) {
 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 	} else {
 		offset = start_page << PAGE_SHIFT;
@@ -475,7 +377,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 	default:
 		BUG();
 	}
-	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
 	map->virtual = NULL;
 	map->page = NULL;
 }
@@ -483,7 +385,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 
 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 {
-	struct ttm_resource *mem = &bo->mem;
+	struct ttm_resource *mem = bo->resource;
 	int ret;
 
 	ret = ttm_mem_io_reserve(bo->bdev, mem);
@@ -542,7 +444,7 @@ EXPORT_SYMBOL(ttm_bo_vmap);
 
 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 {
-	struct ttm_resource *mem = &bo->mem;
+	struct ttm_resource *mem = bo->resource;
 
 	if (dma_buf_map_is_null(map))
 		return;
@@ -553,7 +455,7 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 		iounmap(map->vaddr_iomem);
 	dma_buf_map_clear(map);
 
-	ttm_mem_io_free(bo->bdev, &bo->mem);
+	ttm_mem_io_free(bo->bdev, bo->resource);
 }
 EXPORT_SYMBOL(ttm_bo_vunmap);
 
@@ -567,7 +469,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
 
 	if (!dst_use_tt)
 		ttm_bo_tt_destroy(bo);
-	ttm_resource_free(bo, &bo->mem);
+	ttm_resource_free(bo, &bo->resource);
 	return 0;
 }
 
@@ -605,6 +507,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 		ghost_obj->ttm = NULL;
 	else
 		bo->ttm = NULL;
+	bo->resource = NULL;
 
 	dma_resv_unlock(&ghost_obj->base._resv);
 	ttm_bo_put(ghost_obj);
@@ -615,7 +518,9 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
 				       struct dma_fence *fence)
 {
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+	struct ttm_resource_manager *from;
+
+	from = ttm_manager_type(bdev, bo->resource->mem_type);
 
 	/**
 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
@@ -628,7 +533,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
 	}
 	spin_unlock(&from->move_lock);
 
-	ttm_resource_free(bo, &bo->mem);
+	ttm_resource_free(bo, &bo->resource);
 
 	dma_fence_put(bo->moving);
 	bo->moving = dma_fence_get(fence);
@@ -641,7 +546,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      struct ttm_resource *new_mem)
 {
 	struct ttm_device *bdev = bo->bdev;
-	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
 	int ret = 0;
 
@@ -662,26 +567,82 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 
+/**
+ * ttm_bo_pipeline_gutting - purge the contents of a bo
+ * @bo: The buffer object
+ *
+ * Purge the contents of a bo, async if the bo is not idle.
+ * After a successful call, the bo is left unpopulated in
+ * system placement. The function may wait uninterruptible
+ * for idle on OOM.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 {
 	static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
 	struct ttm_buffer_object *ghost;
+	struct ttm_resource *sys_res;
+	struct ttm_tt *ttm;
 	int ret;
 
-	ret = ttm_buffer_object_transfer(bo, &ghost);
+	ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
 	if (ret)
 		return ret;
 
+	/* If already idle, no need for ghost object dance. */
+	ret = ttm_bo_wait(bo, false, true);
+	if (ret != -EBUSY) {
+		if (!bo->ttm) {
+			/* See comment below about clearing. */
+			ret = ttm_tt_create(bo, true);
+			if (ret)
+				goto error_free_sys_mem;
+		} else {
+			ttm_tt_unpopulate(bo->bdev, bo->ttm);
+			if (bo->type == ttm_bo_type_device)
+				ttm_tt_mark_for_clear(bo->ttm);
+		}
+		ttm_resource_free(bo, &bo->resource);
+		ttm_bo_assign_mem(bo, sys_res);
+		return 0;
+	}
+
+	/*
+	 * We need an unpopulated ttm_tt after giving our current one,
+	 * if any, to the ghost object. And we can't afford to fail
+	 * creating one *after* the operation. If the bo subsequently gets
+	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
+	 * to avoid leaking sensitive information to user-space.
+	 */
+
+	ttm = bo->ttm;
+	bo->ttm = NULL;
+	ret = ttm_tt_create(bo, true);
+	swap(bo->ttm, ttm);
+	if (ret)
+		goto error_free_sys_mem;
+
+	ret = ttm_buffer_object_transfer(bo, &ghost);
+	if (ret)
+		goto error_destroy_tt;
+
 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
 	/* Last resort, wait for the BO to be idle when we are OOM */
 	if (ret)
 		ttm_bo_wait(bo, false, false);
 
-	ttm_resource_alloc(bo, &sys_mem, &bo->mem);
-	bo->ttm = NULL;
-
 	dma_resv_unlock(&ghost->base._resv);
 	ttm_bo_put(ghost);
-
+	bo->ttm = ttm;
+	bo->resource = NULL;
+	ttm_bo_assign_mem(bo, sys_res);
 	return 0;
+
+error_destroy_tt:
+	ttm_tt_destroy(bo->bdev, ttm);
+
+error_free_sys_mem:
+	ttm_resource_free(bo, &sys_res);
+	return ret;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 9bd15cb391450..f56be5bc0861e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -102,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
 	if (bdev->funcs->io_mem_pfn)
 		return bdev->funcs->io_mem_pfn(bo, page_offset);
 
-	return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
+	return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
 }
 
 /**
@@ -200,10 +200,10 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
 
 	/* Fault should not cross bo boundary. */
 	page_offset &= ~(fault_page_size - 1);
-	if (page_offset + fault_page_size > bo->mem.num_pages)
+	if (page_offset + fault_page_size > bo->resource->num_pages)
 		goto out_fallback;
 
-	if (bo->mem.bus.is_iomem)
+	if (bo->resource->bus.is_iomem)
 		pfn = ttm_bo_io_mem_pfn(bo, page_offset);
 	else
 		pfn = page_to_pfn(ttm->pages[page_offset]);
@@ -213,7 +213,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
 		goto out_fallback;
 
 	/* Check that memory is contiguous. */
-	if (!bo->mem.bus.is_iomem) {
+	if (!bo->resource->bus.is_iomem) {
 		for (i = 1; i < fault_page_size; ++i) {
 			if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
 				goto out_fallback;
@@ -299,7 +299,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	if (unlikely(ret != 0))
 		return ret;
 
-	err = ttm_mem_io_reserve(bdev, &bo->mem);
+	err = ttm_mem_io_reserve(bdev, bo->resource);
 	if (unlikely(err != 0))
 		return VM_FAULT_SIGBUS;
 
@@ -308,11 +308,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	page_last = vma_pages(vma) + vma->vm_pgoff -
 		drm_vma_node_start(&bo->base.vma_node);
 
-	if (unlikely(page_offset >= bo->mem.num_pages))
+	if (unlikely(page_offset >= bo->resource->num_pages))
 		return VM_FAULT_SIGBUS;
 
-	prot = ttm_io_prot(bo, &bo->mem, prot);
-	if (!bo->mem.bus.is_iomem) {
+	prot = ttm_io_prot(bo, bo->resource, prot);
+	if (!bo->resource->bus.is_iomem) {
 		struct ttm_operation_ctx ctx = {
 			.interruptible = false,
 			.no_wait_gpu = false,
@@ -337,7 +337,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	 * first page.
 	 */
 	for (i = 0; i < num_prefault; ++i) {
-		if (bo->mem.bus.is_iomem) {
+		if (bo->resource->bus.is_iomem) {
 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
 		} else {
 			page = ttm->pages[page_offset];
@@ -359,12 +359,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 		 * at arbitrary times while the data is mmap'ed.
 		 * See vmf_insert_mixed_prot() for a discussion.
 		 */
-		if (vma->vm_flags & VM_MIXEDMAP)
-			ret = vmf_insert_mixed_prot(vma, address,
-						    __pfn_to_pfn_t(pfn, PFN_DEV),
-						    prot);
-		else
-			ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
 
 		/* Never error on prefaulted PTEs */
 		if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -411,15 +406,9 @@ vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
 	pfn = page_to_pfn(page);
 
 	/* Prefault the entire VMA range right away to avoid further faults */
-	for (address = vma->vm_start; address < vma->vm_end; address += PAGE_SIZE) {
-
-		if (vma->vm_flags & VM_MIXEDMAP)
-			ret = vmf_insert_mixed_prot(vma, address,
-						    __pfn_to_pfn_t(pfn, PFN_DEV),
-						    prot);
-		else
-			ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
-	}
+	for (address = vma->vm_start; address < vma->vm_end;
+	     address += PAGE_SIZE)
+		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
 
 	return ret;
 }
@@ -521,14 +510,14 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
 		 << PAGE_SHIFT);
 	int ret;
 
-	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
+	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
 		return -EIO;
 
 	ret = ttm_bo_reserve(bo, true, false, NULL);
 	if (ret)
 		return ret;
 
-	switch (bo->mem.mem_type) {
+	switch (bo->resource->mem_type) {
 	case TTM_PL_SYSTEM:
 		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 			ret = ttm_tt_swapin(bo->ttm);
@@ -560,8 +549,14 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
 	.access = ttm_bo_vm_access,
 };
 
-static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 {
+	/* Enforce no COW since would have really strange behavior with it. */
+	if (is_cow_mapping(vma->vm_flags))
+		return -EINVAL;
+
+	ttm_bo_get(bo);
+
 	/*
 	 * Drivers may want to override the vm_ops field. Otherwise we
 	 * use TTM's default callbacks.
@@ -576,21 +571,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s
 
 	vma->vm_private_data = bo;
 
-	/*
-	 * We'd like to use VM_PFNMAP on shared mappings, where
-	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
-	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
-	 * bad for performance. Until that has been sorted out, use
-	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
-	 */
-	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_flags |= VM_PFNMAP;
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-}
-
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
-{
-	ttm_bo_get(bo);
-	ttm_bo_mmap_vma_setup(bo, vma);
 	return 0;
 }
 EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 56b0efdba1a97..997c458f68a9a 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -31,12 +31,47 @@
  */
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/pgtable.h>
 #include <linux/sched.h>
 #include <linux/debugfs.h>
 #include <drm/drm_sysfs.h>
+#include <drm/ttm/ttm_caching.h>
 
 #include "ttm_module.h"
 
+/**
+ * ttm_prot_from_caching - Modify the page protection according to the
+ * ttm cacing mode
+ * @caching: The ttm caching mode
+ * @tmp: The original page protection
+ *
+ * Return: The modified page protection
+ */
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
+{
+	/* Cached mappings need no adjustment */
+	if (caching == ttm_cached)
+		return tmp;
+
+#if defined(__i386__) || defined(__x86_64__)
+	if (caching == ttm_write_combined)
+		tmp = pgprot_writecombine(tmp);
+	else if (boot_cpu_data.x86 > 3)
+		tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+	defined(__powerpc__) || defined(__mips__)
+	if (caching == ttm_write_combined)
+		tmp = pgprot_writecombine(tmp);
+	else
+		tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+	tmp = pgprot_noncached(tmp);
+#endif
+	return tmp;
+}
+
 struct dentry *ttm_debugfs_root;
 
 static int __init ttm_init(void)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index b9d5da6e6a810..03395386e8a7c 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,12 +29,13 @@
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
 
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_bo_api.h>
 #include <drm/drm_mm.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/module.h>
 
 /*
  * Currently we use a spinlock for the lock, but a mutex *may* be
@@ -57,11 +58,11 @@ to_range_manager(struct ttm_resource_manager *man)
 static int ttm_range_man_alloc(struct ttm_resource_manager *man,
 			       struct ttm_buffer_object *bo,
 			       const struct ttm_place *place,
-			       struct ttm_resource *mem)
+			       struct ttm_resource **res)
 {
 	struct ttm_range_manager *rman = to_range_manager(man);
+	struct ttm_range_mgr_node *node;
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node;
 	enum drm_mm_insert_mode mode;
 	unsigned long lpfn;
 	int ret;
@@ -70,7 +71,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
 	if (!lpfn)
 		lpfn = man->size;
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
 
@@ -78,35 +79,36 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		mode = DRM_MM_INSERT_HIGH;
 
+	ttm_resource_init(bo, place, &node->base);
+
 	spin_lock(&rman->lock);
-	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+	ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+					  node->base.num_pages,
 					  bo->page_alignment, 0,
 					  place->fpfn, lpfn, mode);
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
 		kfree(node);
-	} else {
-		mem->mm_node = node;
-		mem->start = node->start;
+		return ret;
 	}
 
-	return ret;
+	node->base.start = node->mm_nodes[0].start;
+	*res = &node->base;
+	return 0;
 }
 
 static void ttm_range_man_free(struct ttm_resource_manager *man,
-			       struct ttm_resource *mem)
+			       struct ttm_resource *res)
 {
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct ttm_range_manager *rman = to_range_manager(man);
 
-	if (mem->mm_node) {
-		spin_lock(&rman->lock);
-		drm_mm_remove_node(mem->mm_node);
-		spin_unlock(&rman->lock);
+	spin_lock(&rman->lock);
+	drm_mm_remove_node(&node->mm_nodes[0]);
+	spin_unlock(&rman->lock);
 
-		kfree(mem->mm_node);
-		mem->mm_node = NULL;
-	}
+	kfree(node);
 }
 
 static void ttm_range_man_debug(struct ttm_resource_manager *man,
@@ -125,6 +127,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
 	.debug = ttm_range_man_debug
 };
 
+/**
+ * ttm_range_man_init
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ * @use_tt: if the memory manager uses tt
+ * @p_size: size of area to be managed in pages.
+ *
+ * Initialise a generic range manager for the selected memory type.
+ * The range manager is installed for this device in the type slot.
+ */
 int ttm_range_man_init(struct ttm_device *bdev,
 		       unsigned type, bool use_tt,
 		       unsigned long p_size)
@@ -152,6 +165,14 @@ int ttm_range_man_init(struct ttm_device *bdev,
 }
 EXPORT_SYMBOL(ttm_range_man_init);
 
+/**
+ * ttm_range_man_fini
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ *
+ * Remove the generic range manager from a slot and tear it down.
+ */
 int ttm_range_man_fini(struct ttm_device *bdev,
 		       unsigned type)
 {
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 59e2b7157e415..2431717376e70 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,17 +22,17 @@
  * Authors: Christian König
  */
 
+#include <linux/dma-buf-map.h>
+#include <linux/io-mapping.h>
+#include <linux/scatterlist.h>
+
 #include <drm/ttm/ttm_resource.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
-int ttm_resource_alloc(struct ttm_buffer_object *bo,
-		       const struct ttm_place *place,
-		       struct ttm_resource *res)
+void ttm_resource_init(struct ttm_buffer_object *bo,
+                       const struct ttm_place *place,
+                       struct ttm_resource *res)
 {
-	struct ttm_resource_manager *man =
-		ttm_manager_type(bo->bdev, place->mem_type);
-
-	res->mm_node = NULL;
 	res->start = 0;
 	res->num_pages = PFN_UP(bo->base.size);
 	res->mem_type = place->mem_type;
@@ -41,18 +41,29 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
 	res->bus.offset = 0;
 	res->bus.is_iomem = false;
 	res->bus.caching = ttm_cached;
-
-	return man->func->alloc(man, bo, place, res);
 }
+EXPORT_SYMBOL(ttm_resource_init);
 
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+		       const struct ttm_place *place,
+		       struct ttm_resource **res_ptr)
 {
 	struct ttm_resource_manager *man =
-		ttm_manager_type(bo->bdev, res->mem_type);
+		ttm_manager_type(bo->bdev, place->mem_type);
 
-	man->func->free(man, res);
-	res->mm_node = NULL;
-	res->mem_type = TTM_PL_SYSTEM;
+	return man->func->alloc(man, bo, place, res_ptr);
+}
+
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
+{
+	struct ttm_resource_manager *man;
+
+	if (!*res)
+		return;
+
+	man = ttm_manager_type(bo->bdev, (*res)->mem_type);
+	man->func->free(man, *res);
+	*res = NULL;
 }
 EXPORT_SYMBOL(ttm_resource_free);
 
@@ -147,3 +158,192 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 		man->func->debug(man, p);
 }
 EXPORT_SYMBOL(ttm_resource_manager_debug);
+
+static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
+					  struct dma_buf_map *dmap,
+					  pgoff_t i)
+{
+	struct ttm_kmap_iter_iomap *iter_io =
+		container_of(iter, typeof(*iter_io), base);
+	void __iomem *addr;
+
+retry:
+	while (i >= iter_io->cache.end) {
+		iter_io->cache.sg = iter_io->cache.sg ?
+			sg_next(iter_io->cache.sg) : iter_io->st->sgl;
+		iter_io->cache.i = iter_io->cache.end;
+		iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
+			PAGE_SHIFT;
+		iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
+			iter_io->start;
+	}
+
+	if (i < iter_io->cache.i) {
+		iter_io->cache.end = 0;
+		iter_io->cache.sg = NULL;
+		goto retry;
+	}
+
+	addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
+				       (((resource_size_t)i - iter_io->cache.i)
+					<< PAGE_SHIFT));
+	dma_buf_map_set_vaddr_iomem(dmap, addr);
+}
+
+static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
+					    struct dma_buf_map *map)
+{
+	io_mapping_unmap_local(map->vaddr_iomem);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
+	.map_local =  ttm_kmap_iter_iomap_map_local,
+	.unmap_local = ttm_kmap_iter_iomap_unmap_local,
+	.maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
+ * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
+ * @iomap: The struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct
+ * ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+			 struct io_mapping *iomap,
+			 struct sg_table *st,
+			 resource_size_t start)
+{
+	iter_io->base.ops = &ttm_kmap_iter_io_ops;
+	iter_io->iomap = iomap;
+	iter_io->st = st;
+	iter_io->start = start;
+	memset(&iter_io->cache, 0, sizeof(iter_io->cache));
+
+	return &iter_io->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
+
+/**
+ * DOC: Linear io iterator
+ *
+ * This code should die in the not too near future. Best would be if we could
+ * make io-mapping use memremap for all io memory, and have memremap
+ * implement a kmap_local functionality. We could then strip a huge amount of
+ * code. These linear io iterators are implemented to mimic old functionality,
+ * and they don't use kmap_local semantics at all internally. Rather ioremap or
+ * friends, and at least on 32-bit they add global TLB flushes and points
+ * of failure.
+ */
+
+static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
+					      struct dma_buf_map *dmap,
+					      pgoff_t i)
+{
+	struct ttm_kmap_iter_linear_io *iter_io =
+		container_of(iter, typeof(*iter_io), base);
+
+	*dmap = iter_io->dmap;
+	dma_buf_map_incr(dmap, i * PAGE_SIZE);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
+	.map_local =  ttm_kmap_iter_linear_io_map_local,
+	.maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It sets up a memcpy kmap iterator
+ * pointing at a linear chunk of io memory.
+ *
+ * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
+ * failure.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+			     struct ttm_device *bdev,
+			     struct ttm_resource *mem)
+{
+	int ret;
+
+	ret = ttm_mem_io_reserve(bdev, mem);
+	if (ret)
+		goto out_err;
+	if (!mem->bus.is_iomem) {
+		ret = -EINVAL;
+		goto out_io_free;
+	}
+
+	if (mem->bus.addr) {
+		dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+		iter_io->needs_unmap = false;
+	} else {
+		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+
+		iter_io->needs_unmap = true;
+		memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
+		if (mem->bus.caching == ttm_write_combined)
+			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+						    ioremap_wc(mem->bus.offset,
+							       bus_size));
+		else if (mem->bus.caching == ttm_cached)
+			dma_buf_map_set_vaddr(&iter_io->dmap,
+					      memremap(mem->bus.offset, bus_size,
+						       MEMREMAP_WB |
+						       MEMREMAP_WT |
+						       MEMREMAP_WC));
+
+		/* If uncached requested or if mapping cached or wc failed */
+		if (dma_buf_map_is_null(&iter_io->dmap))
+			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+						    ioremap(mem->bus.offset,
+							    bus_size));
+
+		if (dma_buf_map_is_null(&iter_io->dmap)) {
+			ret = -ENOMEM;
+			goto out_io_free;
+		}
+	}
+
+	iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
+	return &iter_io->base;
+
+out_io_free:
+	ttm_mem_io_free(bdev, mem);
+out_err:
+	return ERR_PTR(ret);
+}
+
+/**
+ * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It cleans up a memcpy kmap
+ * iterator initialized by ttm_kmap_iter_linear_io_init.
+ */
+void
+ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+			     struct ttm_device *bdev,
+			     struct ttm_resource *mem)
+{
+	if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
+		if (iter_io->dmap.is_iomem)
+			iounmap(iter_io->dmap.vaddr_iomem);
+		else
+			memunmap(iter_io->dmap.vaddr);
+	}
+
+	ttm_mem_io_free(bdev, mem);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 474221e863d06..63aca52f75e12 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -3,20 +3,27 @@
 #include <drm/ttm/ttm_resource.h>
 #include <drm/ttm/ttm_device.h>
 #include <drm/ttm/ttm_placement.h>
+#include <linux/slab.h>
 
 #include "ttm_module.h"
 
 static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
 			     struct ttm_buffer_object *bo,
 			     const struct ttm_place *place,
-			     struct ttm_resource *mem)
+			     struct ttm_resource **res)
 {
+	*res = kzalloc(sizeof(**res), GFP_KERNEL);
+	if (!*res)
+		return -ENOMEM;
+
+	ttm_resource_init(bo, place, *res);
 	return 0;
 }
 
 static void ttm_sys_man_free(struct ttm_resource_manager *man,
-			     struct ttm_resource *mem)
+			     struct ttm_resource *res)
 {
+	kfree(res);
 }
 
 static const struct ttm_resource_manager_func ttm_sys_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 539e0232cb3b4..24031a8acd2dd 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -415,7 +415,7 @@ DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
 #endif
 
 
-/**
+/*
  * ttm_tt_mgr_init - register with the MM shrinker
  *
  * Register with the MM shrinker for swapping out BOs.
@@ -433,3 +433,48 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
 	if (!ttm_dma32_pages_limit)
 		ttm_dma32_pages_limit = num_dma32_pages;
 }
+
+static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
+				       struct dma_buf_map *dmap,
+				       pgoff_t i)
+{
+	struct ttm_kmap_iter_tt *iter_tt =
+		container_of(iter, typeof(*iter_tt), base);
+
+	dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+							 iter_tt->prot));
+}
+
+static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
+					 struct dma_buf_map *map)
+{
+	kunmap_local(map->vaddr);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
+	.map_local = ttm_kmap_iter_tt_map_local,
+	.unmap_local = ttm_kmap_iter_tt_unmap_local,
+	.maps_tt = true,
+};
+
+/**
+ * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
+ * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
+ * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+		      struct ttm_tt *tt)
+{
+	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
+	iter_tt->tt = tt;
+	if (tt)
+		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
+	else
+		iter_tt->prot = PAGE_KERNEL;
+
+	return &iter_tt->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 361d3193258ea..8c041d7ce4f1b 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -9,7 +9,8 @@
 #include "hgsmi_ch_setup.h"
 
 /**
- * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * hgsmi_report_flags_location - Inform the host of the location of
+ *                               the host flags in VRAM via an HGSMI cmd.
  * Return: 0 or negative errno value.
  * @ctx:        The context of the guest heap to use.
  * @location:   The offset chosen for the flags within guest VRAM.
@@ -33,7 +34,8 @@ int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
 }
 
 /**
- * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * hgsmi_send_caps_info - Notify the host of HGSMI-related guest capabilities
+ *                        via an HGSMI command.
  * Return: 0 or negative errno value.
  * @ctx:        The context of the guest heap to use.
  * @caps:       The capabilities to report, see vbva_caps.
@@ -71,7 +73,8 @@ int hgsmi_test_query_conf(struct gen_pool *ctx)
 }
 
 /**
- * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * hgsmi_query_conf - Query the host for an HGSMI configuration
+ *                    parameter via an HGSMI command.
  * Return: 0 or negative errno value.
  * @ctx:        The context containing the heap used.
  * @index:      The index of the parameter to query.
@@ -99,7 +102,8 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
 }
 
 /**
- * Pass the host a new mouse pointer shape via an HGSMI command.
+ * hgsmi_update_pointer_shape - Pass the host a new mouse pointer shape
+ *                              via an HGSMI command.
  * Return: 0 or negative errno value.
  * @ctx:        The context containing the heap to be used.
  * @flags:      Cursor flags.
@@ -171,9 +175,10 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
 }
 
 /**
- * Report the guest cursor position.  The host may wish to use this information
- * to re-position its own cursor (though this is currently unlikely).  The
- * current host cursor position is returned.
+ * hgsmi_cursor_position - Report the guest cursor position.  The host may
+ *                         wish to use this information to re-position its
+ *                         own cursor (though this is currently unlikely).
+ *                         The current host cursor position is returned.
  * Return: 0 or negative errno value.
  * @ctx:              The context containing the heap used.
  * @report_position:  Are we reporting a position?
diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
index 7580b90023792..10b32d986b956 100644
--- a/drivers/gpu/drm/vboxvideo/modesetting.c
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
@@ -8,9 +8,11 @@
 #include "hgsmi_channels.h"
 
 /**
- * Set a video mode via an HGSMI request.  The views must have been
- * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
- * set on the first display then it must be set first using registers.
+ * hgsmi_process_display_info - Set a video mode via an HGSMI request.
+ *                              The views must have been initialised first
+ *                              using @a VBoxHGSMISendViewInfo and if the mode
+ *                              is being set on the first display then it must
+ *                              be set first using registers.
  * @ctx:           The context containing the heap to use.
  * @display:       The screen number.
  * @origin_x:      The horizontal displacement relative to the first scrn.
@@ -51,10 +53,12 @@ void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
 }
 
 /**
- * Report the rectangle relative to which absolute pointer events should be
- * expressed.  This information remains valid until the next VBVA resize event
- * for any screen, at which time it is reset to the bounding rectangle of all
- * virtual screens.
+ * hgsmi_update_input_mapping - Report the rectangle relative to which absolute
+ *                              pointer events should be expressed.  This
+ *                              information remains valid until the next VBVA
+ *                              resize event for any screen, at which time it is
+ *                              reset to the bounding rectangle of all virtual
+ *                              screens.
  * Return: 0 or negative errno value.
  * @ctx:       The context containing the heap to use.
  * @origin_x:  Upper left X co-ordinate relative to the first screen.
@@ -84,7 +88,7 @@ int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
 }
 
 /**
- * Get most recent video mode hints.
+ * hgsmi_get_mode_hints - Get most recent video mode hints.
  * Return: 0 or negative errno value.
  * @ctx:      The context containing the heap to use.
  * @screens:  The number of screens to query hints for, starting at 0.
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index f9de8632a28b5..3c4cc133e3dfa 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -166,10 +166,9 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
 	struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
 	bool connected = false;
 
-	if (vc4_hdmi->hpd_gpio) {
-		if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
-		    vc4_hdmi->hpd_active_low)
-			connected = true;
+	if (vc4_hdmi->hpd_gpio &&
+	    gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
+		connected = true;
 	} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
 		connected = true;
 	} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
@@ -2105,7 +2104,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 	struct vc4_hdmi *vc4_hdmi;
 	struct drm_encoder *encoder;
 	struct device_node *ddc_node;
-	u32 value;
 	int ret;
 
 	vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
@@ -2144,18 +2142,10 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 	/* Only use the GPIO HPD pin if present in the DT, otherwise
 	 * we'll use the HDMI core's register.
 	 */
-	if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
-		enum of_gpio_flags hpd_gpio_flags;
-
-		vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
-							     "hpd-gpios", 0,
-							     &hpd_gpio_flags);
-		if (vc4_hdmi->hpd_gpio < 0) {
-			ret = vc4_hdmi->hpd_gpio;
-			goto err_unprepare_hsm;
-		}
-
-		vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+	vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+	if (IS_ERR(vc4_hdmi->hpd_gpio)) {
+		ret = PTR_ERR(vc4_hdmi->hpd_gpio);
+		goto err_put_ddc;
 	}
 
 	vc4_hdmi->disable_wifi_frequencies =
@@ -2209,8 +2199,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 	vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
 err_destroy_encoder:
 	drm_encoder_cleanup(encoder);
-err_unprepare_hsm:
 	pm_runtime_disable(dev);
+err_put_ddc:
 	put_device(&vc4_hdmi->ddc->dev);
 
 	return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 39bd2c413ec04..884d245507a9a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -148,8 +148,7 @@ struct vc4_hdmi {
 	/* VC5 Only */
 	void __iomem *rm_regs;
 
-	int hpd_gpio;
-	bool hpd_active_low;
+	struct gpio_desc *hpd_gpio;
 
 	/*
 	 * On some systems (like the RPi4), some modes are in the same
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a0e75f1d5d016..bf38a7e319d14 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -25,7 +25,7 @@
  *	Ben Widawsky <ben@bwidawsk.net>
  */
 
-/**
+/*
  * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
  * software renderer and the X server for efficient buffer sharing.
  */
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2902dc6e64faf..bd6f75285fd95 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 
 	/* Check for a conflicting fence */
 	resv = obj->resv;
-	if (!dma_resv_test_signaled_rcu(resv,
-						  arg->flags & VGEM_FENCE_WRITE)) {
+	if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
 		ret = -EBUSY;
 		goto err_fence;
 	}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 669f2ee395154..5c1ad1596889b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 
 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
-		ret = dma_resv_test_signaled_rcu(obj->resv, true);
+		ret = dma_resv_test_signaled(obj->resv, true);
 	} else {
-		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
-						timeout);
+		ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
 	}
 	if (ret == 0)
 		ret = -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index cdbd5a8707116..09fe20e918f93 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -483,10 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 	d.src_addr = NULL;
 	d.dst_pages = dst->ttm->pages;
 	d.src_pages = src->ttm->pages;
-	d.dst_num_pages = dst->mem.num_pages;
-	d.src_num_pages = src->mem.num_pages;
-	d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
-	d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
+	d.dst_num_pages = dst->resource->num_pages;
+	d.src_num_pages = src->resource->num_pages;
+	d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+	d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
 	d.diff = diff;
 
 	for (j = 0; j < h; ++j) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 04dd49c4c2572..362f56d5b12ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -103,7 +103,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
 		goto err;
 
 	if (buf->base.pin_count > 0)
-		ret = ttm_bo_mem_compat(placement, &bo->mem,
+		ret = ttm_bo_mem_compat(placement, bo->resource,
 					&new_flags) == true ? 0 : -EINVAL;
 	else
 		ret = ttm_bo_validate(bo, placement, &ctx);
@@ -145,7 +145,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 		goto err;
 
 	if (buf->base.pin_count > 0) {
-		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
 					&new_flags) == true ? 0 : -EINVAL;
 		goto out_unreserve;
 	}
@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	uint32_t new_flags;
 
 	place = vmw_vram_placement.placement[0];
-	place.lpfn = bo->mem.num_pages;
+	place.lpfn = bo->resource->num_pages;
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -227,22 +227,22 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	 * In that case, evict it first because TTM isn't good at handling
 	 * that situation.
 	 */
-	if (bo->mem.mem_type == TTM_PL_VRAM &&
-	    bo->mem.start < bo->mem.num_pages &&
-	    bo->mem.start > 0 &&
+	if (bo->resource->mem_type == TTM_PL_VRAM &&
+	    bo->resource->start < bo->resource->num_pages &&
+	    bo->resource->start > 0 &&
 	    buf->base.pin_count == 0) {
 		ctx.interruptible = false;
 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
 	}
 
 	if (buf->base.pin_count > 0)
-		ret = ttm_bo_mem_compat(&placement, &bo->mem,
+		ret = ttm_bo_mem_compat(&placement, bo->resource,
 					&new_flags) == true ? 0 : -EINVAL;
 	else
 		ret = ttm_bo_validate(bo, &placement, &ctx);
 
 	/* For some reason we didn't end up at the start of vram */
-	WARN_ON(ret == 0 && bo->mem.start != 0);
+	WARN_ON(ret == 0 && bo->resource->start != 0);
 	if (!ret)
 		vmw_bo_pin_reserved(buf, true);
 
@@ -293,11 +293,11 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 			  SVGAGuestPtr *ptr)
 {
-	if (bo->mem.mem_type == TTM_PL_VRAM) {
+	if (bo->resource->mem_type == TTM_PL_VRAM) {
 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
-		ptr->offset = bo->mem.start << PAGE_SHIFT;
+		ptr->offset = bo->resource->start << PAGE_SHIFT;
 	} else {
-		ptr->gmrId = bo->mem.start;
+		ptr->gmrId = bo->resource->start;
 		ptr->offset = 0;
 	}
 }
@@ -316,7 +316,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 	struct ttm_place pl;
 	struct ttm_placement placement;
 	struct ttm_buffer_object *bo = &vbo->base;
-	uint32_t old_mem_type = bo->mem.mem_type;
+	uint32_t old_mem_type = bo->resource->mem_type;
 	int ret;
 
 	dma_resv_assert_held(bo->base.resv);
@@ -326,8 +326,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 
 	pl.fpfn = 0;
 	pl.lpfn = 0;
-	pl.mem_type = bo->mem.mem_type;
-	pl.flags = bo->mem.placement;
+	pl.mem_type = bo->resource->mem_type;
+	pl.flags = bo->resource->placement;
 
 	memset(&placement, 0, sizeof(placement));
 	placement.num_placement = 1;
@@ -335,7 +335,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 
 	ret = ttm_bo_validate(bo, &placement, &ctx);
 
-	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
 
 	if (pin)
 		ttm_bo_pin(bo);
@@ -369,7 +369,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 	if (virtual)
 		return virtual;
 
-	ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
+	ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
 	if (ret)
 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
@@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 	if (flags & drm_vmw_synccpu_allow_cs) {
 		long lret;
 
-		lret = dma_resv_wait_timeout_rcu
-			(bo->base.resv, true, true,
-			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+		lret = dma_resv_wait_timeout(bo->base.resv, true, true,
+					     nonblock ? 0 :
+					     MAX_SCHEDULE_TIMEOUT);
 		if (!lret)
 			return -EBUSY;
 		else if (lret < 0)
@@ -1197,7 +1197,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
 	 * With other types of moves, the underlying pages stay the same,
 	 * and the map can be kept.
 	 */
-	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
 		vmw_bo_unmap(vbo);
 
 	/*
@@ -1205,6 +1205,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
 	 * read back all resource content first, and unbind the MOB from
 	 * the resource.
 	 */
-	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
 		vmw_resource_unbind_list(vbo);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 9c89189a226d5..956b85e35cefb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -576,11 +576,11 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 	cmd->body.cid = cid;
 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 
-	if (bo->mem.mem_type == TTM_PL_VRAM) {
+	if (bo->resource->mem_type == TTM_PL_VRAM) {
 		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
-		cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT;
+		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
 	} else {
-		cmd->body.guestResult.gmrId = bo->mem.start;
+		cmd->body.guestResult.gmrId = bo->resource->start;
 		cmd->body.guestResult.offset = 0;
 	}
 
@@ -621,8 +621,8 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = cid;
 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
-	cmd->body.mobid = bo->mem.start;
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
+	cmd->body.mobid = bo->resource->start;
 	cmd->body.offset = 0;
 
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 05ca310ed61ab..6bb4961e64a57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -889,7 +889,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 	header->cmd = man->map + offset;
 	if (man->using_mob) {
 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
-		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+		cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
 		cb_hdr->ptr.mob.mobOffset = offset;
 	} else {
 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3ed9914cb994b..dffe3804ad3e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -346,7 +346,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
 	} *cmd;
 	struct ttm_buffer_object *bo = val_buf->bo;
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
@@ -355,7 +355,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	cmd->body.mobid = bo->mem.start;
+	cmd->body.mobid = bo->resource->start;
 	cmd->body.validContents = res->backup_dirty;
 	res->backup_dirty = false;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -385,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 	uint8_t *cmd;
 
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
 	vmw_binding_state_scrub(uctx->cbs);
@@ -513,7 +513,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
 	} *cmd;
 	struct ttm_buffer_object *bo = val_buf->bo;
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
@@ -522,7 +522,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
-	cmd->body.mobid = bo->mem.start;
+	cmd->body.mobid = bo->resource->start;
 	cmd->body.validContents = res->backup_dirty;
 	res->backup_dirty = false;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -594,7 +594,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
 	uint8_t *cmd;
 
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
 	vmw_dx_context_scrub_cotables(res, readback);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b40aa002bf2bb..c84a16c1def0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -173,7 +173,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 		SVGA3dCmdDXSetCOTable body;
 	} *cmd;
 
-	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
 	dma_resv_assert_held(bo->base.resv);
 
 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
@@ -181,12 +181,12 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 		return -ENOMEM;
 
 	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
-	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+	WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
 	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = vcotbl->ctx->id;
 	cmd->body.type = vcotbl->type;
-	cmd->body.mobid = bo->mem.start;
+	cmd->body.mobid = bo->resource->start;
 	cmd->body.validSizeInBytes = vcotbl->size_read_back;
 
 	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
@@ -315,7 +315,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
 	if (!vmw_resource_mob_attached(res))
 		return 0;
 
-	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
 	dma_resv_assert_held(bo->base.resv);
 
 	mutex_lock(&dev_priv->binding_mutex);
@@ -431,7 +431,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
 	 * This should really be a TTM utility.
 	 */
-	for (i = 0; i < old_bo->mem.num_pages; ++i) {
+	for (i = 0; i < old_bo->resource->num_pages; ++i) {
 		bool dummy;
 
 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 32a84dff3fbfe..a2b8464b3f566 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -735,7 +735,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = ctx_res->id;
-	cmd->body.mobid = dx_query_mob->base.mem.start;
+	cmd->body.mobid = dx_query_mob->base.resource->start;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1046,7 +1046,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
+		if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
 			VMW_DEBUG_USER("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -3710,16 +3710,16 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 
 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
 		bo = &reloc->vbo->base;
-		switch (bo->mem.mem_type) {
+		switch (bo->resource->mem_type) {
 		case TTM_PL_VRAM:
-			reloc->location->offset += bo->mem.start << PAGE_SHIFT;
+			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
 			break;
 		case VMW_PL_GMR:
-			reloc->location->gmrId = bo->mem.start;
+			reloc->location->gmrId = bo->resource->start;
 			break;
 		case VMW_PL_MOB:
-			*reloc->mob_loc = bo->mem.start;
+			*reloc->mob_loc = bo->resource->start;
 			break;
 		default:
 			BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 1774960d1b89d..28ceb749a7339 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -52,11 +52,17 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
 static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 				  struct ttm_buffer_object *bo,
 				  const struct ttm_place *place,
-				  struct ttm_resource *mem)
+				  struct ttm_resource **res)
 {
 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 	int id;
 
+	*res = kmalloc(sizeof(**res), GFP_KERNEL);
+	if (!*res)
+		return -ENOMEM;
+
+	ttm_resource_init(bo, place, *res);
+
 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
 	if (id < 0)
 		return id;
@@ -64,36 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 	spin_lock(&gman->lock);
 
 	if (gman->max_gmr_pages > 0) {
-		gman->used_gmr_pages += mem->num_pages;
+		gman->used_gmr_pages += (*res)->num_pages;
 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 			goto nospace;
 	}
 
-	mem->mm_node = gman;
-	mem->start = id;
+	(*res)->start = id;
 
 	spin_unlock(&gman->lock);
 	return 0;
 
 nospace:
-	gman->used_gmr_pages -= mem->num_pages;
+	gman->used_gmr_pages -= (*res)->num_pages;
 	spin_unlock(&gman->lock);
 	ida_free(&gman->gmr_ida, id);
+	kfree(*res);
 	return -ENOSPC;
 }
 
 static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
-				   struct ttm_resource *mem)
+				   struct ttm_resource *res)
 {
 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 
-	if (mem->mm_node) {
-		ida_free(&gman->gmr_ida, mem->start);
-		spin_lock(&gman->lock);
-		gman->used_gmr_pages -= mem->num_pages;
-		spin_unlock(&gman->lock);
-		mem->mm_node = NULL;
-	}
+	ida_free(&gman->gmr_ida, res->start);
+	spin_lock(&gman->lock);
+	gman->used_gmr_pages -= res->num_pages;
+	spin_unlock(&gman->lock);
+	kfree(res);
 }
 
 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 45c9c6a7f1d69..e5a9a5cbd01a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
 {
 	struct vmw_bo_dirty *dirty = vbo->dirty;
-	pgoff_t num_pages = vbo->base.mem.num_pages;
+	pgoff_t num_pages = vbo->base.resource->num_pages;
 	size_t size, acc_size;
 	int ret;
 	static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
 		return ret;
 
 	page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
-	if (unlikely(page_offset >= bo->mem.num_pages)) {
+	if (unlikely(page_offset >= bo->resource->num_pages)) {
 		ret = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->mem.num_pages ||
+		if (page_offset >= bo->resource->num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
@@ -529,7 +529,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->mem.num_pages ||
+		if (page_offset >= bo->resource->num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 62ea920addc30..7b45393ad98e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
 		if (bo->moving)
 			dma_fence_put(bo->moving);
 		bo->moving = dma_fence_get
-			(dma_resv_get_excl(bo->base.resv));
+			(dma_resv_excl_fence(bo->base.resv));
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b391975871a59..b3c8d2da6f1a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -254,7 +254,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
 	} *cmd;
 	struct ttm_buffer_object *bo = val_buf->bo;
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
@@ -263,7 +263,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.shid = res->id;
-	cmd->body.mobid = bo->mem.start;
+	cmd->body.mobid = bo->resource->start;
 	cmd->body.offsetInBytes = res->backup_offset;
 	res->backup_dirty = false;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -282,7 +282,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
 	} *cmd;
 	struct vmw_fence_obj *fence;
 
-	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+	BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
 
 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL))
@@ -402,7 +402,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = shader->ctx->id;
 	cmd->body.shid = shader->id;
-	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.mobid = res->backup->base.resource->start;
 	cmd->body.offsetInBytes = res->backup_offset;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 
@@ -450,7 +450,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct ttm_buffer_object *bo = val_buf->bo;
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 	mutex_lock(&dev_priv->binding_mutex);
 	vmw_dx_shader_unscrub(res);
 	mutex_unlock(&dev_priv->binding_mutex);
@@ -513,7 +513,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
 	struct vmw_fence_obj *fence;
 	int ret;
 
-	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+	BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
 	ret = vmw_dx_shader_scrub(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 1dd042a20a66c..c8efa4a6c995c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -106,7 +106,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
 	cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.soid = so->id;
-	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.mobid = res->backup->base.resource->start;
 	cmd->body.offsetInBytes = res->backup_offset;
 	cmd->body.sizeInBytes = so->size;
 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -142,7 +142,7 @@ static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
 	struct ttm_buffer_object *bo = val_buf->bo;
 	int ret;
 
-	if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+	if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
 		return -EINVAL;
 
 	mutex_lock(&dev_priv->binding_mutex);
@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
 	struct vmw_fence_obj *fence;
 	int ret;
 
-	if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+	if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
 		return -EINVAL;
 
 	mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 8ead065748508..0835468bb2eed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1212,7 +1212,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
 	uint32_t submit_size;
 	struct ttm_buffer_object *bo = val_buf->bo;
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
 
@@ -1223,7 +1223,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
 	cmd1->header.size = sizeof(cmd1->body);
 	cmd1->body.sid = res->id;
-	cmd1->body.mobid = bo->mem.start;
+	cmd1->body.mobid = bo->resource->start;
 	if (res->backup_dirty) {
 		cmd2 = (void *) &cmd1[1];
 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
@@ -1266,7 +1266,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 	uint8_t *cmd;
 
 
-	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
 
 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
 	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 5ccc35b3194c9..2a3d3468e4e0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -7,6 +7,7 @@
 #include "vmwgfx_drv.h"
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 /**
  * struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -50,20 +51,22 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
 static int vmw_thp_get_node(struct ttm_resource_manager *man,
 			    struct ttm_buffer_object *bo,
 			    const struct ttm_place *place,
-			    struct ttm_resource *mem)
+			    struct ttm_resource **res)
 {
 	struct vmw_thp_manager *rman = to_thp_manager(man);
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node;
+	struct ttm_range_mgr_node *node;
 	unsigned long align_pages;
 	unsigned long lpfn;
 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
 	int ret;
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
 
+	ttm_resource_init(bo, place, &node->base);
+
 	lpfn = place->lpfn;
 	if (!lpfn)
 		lpfn = man->size;
@@ -75,23 +78,26 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 	spin_lock(&rman->lock);
 	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
 		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
-		if (mem->num_pages >= align_pages) {
-			ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
-						     place, mem, lpfn, mode);
+		if (node->base.num_pages >= align_pages) {
+			ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+						     align_pages, place,
+						     &node->base, lpfn, mode);
 			if (!ret)
 				goto found_unlock;
 		}
 	}
 
 	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
-	if (mem->num_pages >= align_pages) {
-		ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
-					     mem, lpfn, mode);
+	if (node->base.num_pages >= align_pages) {
+		ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+					     align_pages, place, &node->base,
+					     lpfn, mode);
 		if (!ret)
 			goto found_unlock;
 	}
 
-	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+	ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+					  node->base.num_pages,
 					  bo->page_alignment, 0,
 					  place->fpfn, lpfn, mode);
 found_unlock:
@@ -100,28 +106,24 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 	if (unlikely(ret)) {
 		kfree(node);
 	} else {
-		mem->mm_node = node;
-		mem->start = node->start;
+		node->base.start = node->mm_nodes[0].start;
+		*res = &node->base;
 	}
 
 	return ret;
 }
 
-
-
 static void vmw_thp_put_node(struct ttm_resource_manager *man,
-			     struct ttm_resource *mem)
+			     struct ttm_resource *res)
 {
+	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 	struct vmw_thp_manager *rman = to_thp_manager(man);
 
-	if (mem->mm_node) {
-		spin_lock(&rman->lock);
-		drm_mm_remove_node(mem->mm_node);
-		spin_unlock(&rman->lock);
+	spin_lock(&rman->lock);
+	drm_mm_remove_node(&node->mm_nodes[0]);
+	spin_unlock(&rman->lock);
 
-		kfree(mem->mm_node);
-		mem->mm_node = NULL;
-	}
+	kfree(node);
 }
 
 int vmw_thp_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 35b03fe21161e..0488042fb287b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -719,7 +719,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
 		    struct ttm_resource *new_mem,
 		    struct ttm_place *hop)
 {
-	struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+	struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
 	struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
 	int ret;
 
@@ -729,11 +729,11 @@ static int vmw_move(struct ttm_buffer_object *bo,
 			return ret;
 	}
 
-	vmw_move_notify(bo, &bo->mem, new_mem);
+	vmw_move_notify(bo, bo->resource, new_mem);
 
 	if (old_man->use_tt && new_man->use_tt) {
-		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-			ttm_bo_assign_mem(bo, new_mem);
+		if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+			ttm_bo_move_null(bo, new_mem);
 			return 0;
 		}
 		ret = ttm_bo_wait_ctx(bo, ctx);
@@ -741,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
 			goto fail;
 
 		vmw_ttm_unbind(bo->bdev, bo->ttm);
-		ttm_resource_free(bo, &bo->mem);
+		ttm_resource_free(bo, &bo->resource);
 		ttm_bo_assign_mem(bo, new_mem);
 		return 0;
 	} else {
@@ -751,7 +751,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
 	}
 	return 0;
 fail:
-	vmw_move_notify(bo, new_mem, &bo->mem);
+	vmw_move_notify(bo, new_mem, bo->resource);
 	return ret;
 }
 
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 0d65ce146fc4d..c6e875619fac7 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -66,7 +66,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 	 * may be not up-to-date. Wait for the exporter to finish
 	 * the migration.
 	 */
-	fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
+	fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
 	if (fence)
 		return dma_fence_wait(fence, false);
 
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index b06fa6e42e6e0..4ad0331a8c57d 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -46,7 +46,7 @@
  * The Screen position of the top left corner of the displayed
  * cursor is specificed by CURS_HORZ_VERT_POSN. Care must be taken
  * when the cursor hot spot is not the top left corner and the
- * physical cursor position becomes negative. It will be be displayed
+ * physical cursor position becomes negative. It will be displayed
  * if either the horizontal or vertical cursor position is negative
  *
  * If x becomes negative the cursor manager must adjust the CURS_HORZ_OFFSET
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 52755b591c148..63721337a3778 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -542,8 +542,8 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
 /*
  * show some display controller and cursor registers
  */
-static ssize_t mb862xxfb_show_dispregs(struct device *dev,
-				       struct device_attribute *attr, char *buf)
+static ssize_t dispregs_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
 {
 	struct fb_info *fbi = dev_get_drvdata(dev);
 	struct mb862xxfb_par *par = fbi->par;
@@ -577,7 +577,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
 	return ptr - buf;
 }
 
-static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
+static DEVICE_ATTR_RO(dispregs);
 
 static irqreturn_t mb862xx_intr(int irq, void *dev_id)
 {
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d9156..cc9de1632dd32 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
 
 #include <linux/scatterlist.h>
 
+struct dma_buf_map;
+
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 void drm_clflush_sg(struct sg_table *st);
 void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
 #endif
 }
 
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+			const struct dma_buf_map *src,
+			unsigned long len);
 #endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 10225a0a35d0a..d18af49fd0090 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -275,7 +275,7 @@ struct drm_sched_backend_ops {
  * @pending_list: the list of jobs which are currently in the job queue.
  * @job_list_lock: lock to protect the pending_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
- *              guilty and it will be considered for scheduling further.
+ *              guilty and it will no longer be considered for scheduling.
  * @score: score to help loadbalancer pick a idle sched
  * @_score: score used when the driver doesn't provide one
  * @ready: marks if the underlying HW is ready to work
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index f2a5f37c61b77..f681bbdbc6982 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -136,7 +136,7 @@ struct ttm_buffer_object {
 	 * Members protected by the bo::resv::reserved lock.
 	 */
 
-	struct ttm_resource mem;
+	struct ttm_resource *resource;
 	struct ttm_tt *ttm;
 	bool deleted;
 
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index dbccac957f8fd..68d6069572aa8 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -40,6 +40,7 @@
 #include <drm/ttm/ttm_device.h>
 
 #include "ttm_bo_api.h"
+#include "ttm_kmap_iter.h"
 #include "ttm_placement.h"
 #include "ttm_tt.h"
 #include "ttm_pool.h"
@@ -96,7 +97,7 @@ struct ttm_lru_bulk_move {
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 		     struct ttm_placement *placement,
-		     struct ttm_resource *mem,
+		     struct ttm_resource **mem,
 		     struct ttm_operation_ctx *ctx);
 
 /**
@@ -181,15 +182,15 @@ static inline void
 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
 {
 	spin_lock(&bo->bdev->lru_lock);
-	ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+	ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
 	spin_unlock(&bo->bdev->lru_lock);
 }
 
 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
 				     struct ttm_resource *new_mem)
 {
-	bo->mem = *new_mem;
-	new_mem->mm_node = NULL;
+	WARN_ON(bo->resource);
+	bo->resource = new_mem;
 }
 
 /**
@@ -202,9 +203,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
 				    struct ttm_resource *new_mem)
 {
-	struct ttm_resource *old_mem = &bo->mem;
-
-	WARN_ON(old_mem->mm_node != NULL);
+	ttm_resource_free(bo, &bo->resource);
 	ttm_bo_assign_mem(bo, new_mem);
 }
 
@@ -272,6 +271,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      bool pipeline,
 			      struct ttm_resource *new_mem);
 
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
+static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+					    struct ttm_resource *new_mem)
+{
+	int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
+
+	WARN_ON(ret);
+}
+
 /**
  * ttm_bo_pipeline_gutting.
  *
@@ -306,30 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
  */
 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
 
-/**
- * ttm_range_man_init
- *
- * @bdev: ttm device
- * @type: memory manager type
- * @use_tt: if the memory manager uses tt
- * @p_size: size of area to be managed in pages.
- *
- * Initialise a generic range manager for the selected memory type.
- * The range manager is installed for this device in the type slot.
- */
-int ttm_range_man_init(struct ttm_device *bdev,
-		       unsigned type, bool use_tt,
-		       unsigned long p_size);
-
-/**
- * ttm_range_man_fini
- *
- * @bdev: ttm device
- * @type: memory manager type
- *
- * Remove the generic range manager from a slot and tear it down.
- */
-int ttm_range_man_fini(struct ttm_device *bdev,
-		       unsigned type);
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+		     u32 num_pages,
+		     struct ttm_kmap_iter *dst_iter,
+		     struct ttm_kmap_iter *src_iter);
 
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+			 struct io_mapping *iomap,
+			 struct sg_table *st,
+			 resource_size_t start);
 #endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index a0b4a49fa4328..3c9dd65f5aaff 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -33,4 +33,6 @@ enum ttm_caching {
 	ttm_cached
 };
 
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
 #endif
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 0000000000000..8bb00fd39d6ce
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct dma_buf_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+	/**
+	 * kmap_local() - Map a PAGE_SIZE part of the resource using
+	 * kmap_local semantics.
+	 * @res_iter: Pointer to the struct ttm_kmap_iter representing
+	 * the resource.
+	 * @dmap: The struct dma_buf_map holding the virtual address after
+	 * the operation.
+	 * @i: The location within the resource to map. PAGE_SIZE granularity.
+	 */
+	void (*map_local)(struct ttm_kmap_iter *res_iter,
+			  struct dma_buf_map *dmap, pgoff_t i);
+	/**
+	 * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+	 * mapped using kmap_local.
+	 * @res_iter: Pointer to the struct ttm_kmap_iter representing
+	 * the resource.
+	 * @dmap: The struct dma_buf_map holding the virtual address after
+	 * the operation.
+	 */
+	void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+			    struct dma_buf_map *dmap);
+	bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+	const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 0000000000000..22b6fa42ac20f
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+	struct ttm_resource base;
+	struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+	return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init(struct ttm_device *bdev,
+		       unsigned type, bool use_tt,
+		       unsigned long p_size);
+int ttm_range_man_fini(struct ttm_device *bdev,
+		       unsigned type);
+
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 890b9d3695193..140b6b9a8bbe6 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -27,9 +27,11 @@
 
 #include <linux/types.h>
 #include <linux/mutex.h>
+#include <linux/dma-buf-map.h>
 #include <linux/dma-fence.h>
 #include <drm/drm_print.h>
 #include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
 
 #define TTM_MAX_BO_PRIORITY	4U
 
@@ -38,6 +40,10 @@ struct ttm_resource_manager;
 struct ttm_resource;
 struct ttm_place;
 struct ttm_buffer_object;
+struct dma_buf_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
 
 struct ttm_resource_manager_func {
 	/**
@@ -45,46 +51,38 @@ struct ttm_resource_manager_func {
 	 *
 	 * @man: Pointer to a memory type manager.
 	 * @bo: Pointer to the buffer object we're allocating space for.
-	 * @placement: Placement details.
-	 * @flags: Additional placement flags.
-	 * @mem: Pointer to a struct ttm_resource to be filled in.
+	 * @place: Placement details.
+	 * @res: Resulting pointer to the ttm_resource.
 	 *
 	 * This function should allocate space in the memory type managed
-	 * by @man. Placement details if
-	 * applicable are given by @placement. If successful,
-	 * @mem::mm_node should be set to a non-null value, and
-	 * @mem::start should be set to a value identifying the beginning
+	 * by @man. Placement details if applicable are given by @place. If
+	 * successful, a filled in ttm_resource object should be returned in
+	 * @res. @res::start should be set to a value identifying the beginning
 	 * of the range allocated, and the function should return zero.
-	 * If the memory region accommodate the buffer object, @mem::mm_node
-	 * should be set to NULL, and the function should return 0.
+	 * If the manager can't fulfill the request -ENOSPC should be returned.
 	 * If a system error occurred, preventing the request to be fulfilled,
 	 * the function should return a negative error code.
 	 *
-	 * Note that @mem::mm_node will only be dereferenced by
-	 * struct ttm_resource_manager functions and optionally by the driver,
-	 * which has knowledge of the underlying type.
-	 *
-	 * This function may not be called from within atomic context, so
-	 * an implementation can and must use either a mutex or a spinlock to
-	 * protect any data structures managing the space.
+	 * This function may not be called from within atomic context and needs
+	 * to take care of its own locking to protect any data structures
+	 * managing the space.
 	 */
 	int  (*alloc)(struct ttm_resource_manager *man,
 		      struct ttm_buffer_object *bo,
 		      const struct ttm_place *place,
-		      struct ttm_resource *mem);
+		      struct ttm_resource **res);
 
 	/**
 	 * struct ttm_resource_manager_func member free
 	 *
 	 * @man: Pointer to a memory type manager.
-	 * @mem: Pointer to a struct ttm_resource to be filled in.
+	 * @res: Pointer to a struct ttm_resource to be freed.
 	 *
-	 * This function frees memory type resources previously allocated
-	 * and that are identified by @mem::mm_node and @mem::start. May not
-	 * be called from within atomic context.
+	 * This function frees memory type resources previously allocated.
+	 * May not be called from within atomic context.
 	 */
 	void (*free)(struct ttm_resource_manager *man,
-		     struct ttm_resource *mem);
+		     struct ttm_resource *res);
 
 	/**
 	 * struct ttm_resource_manager_func member debug
@@ -158,9 +156,9 @@ struct ttm_bus_placement {
 /**
  * struct ttm_resource
  *
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
+ * @start: Start of the allocation.
+ * @num_pages: Actual size of resource in pages.
+ * @mem_type: Resource type of the allocation.
  * @placement: Placement flags.
  * @bus: Placement on io bus accessible to the CPU
  *
@@ -168,7 +166,6 @@ struct ttm_bus_placement {
  * buffer object.
  */
 struct ttm_resource {
-	void *mm_node;
 	unsigned long start;
 	unsigned long num_pages;
 	uint32_t mem_type;
@@ -176,6 +173,45 @@ struct ttm_resource {
 	struct ttm_bus_placement bus;
 };
 
+/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+	struct ttm_kmap_iter base;
+	struct io_mapping *iomap;
+	struct sg_table *st;
+	resource_size_t start;
+	struct {
+		struct scatterlist *sg;
+		pgoff_t i;
+		pgoff_t end;
+		pgoff_t offs;
+	} cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+	struct ttm_kmap_iter base;
+	struct dma_buf_map dmap;
+	bool needs_unmap;
+};
+
 /**
  * ttm_resource_manager_set_used
  *
@@ -223,10 +259,13 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 	man->move = NULL;
 }
 
+void ttm_resource_init(struct ttm_buffer_object *bo,
+                       const struct ttm_place *place,
+                       struct ttm_resource *res);
 int ttm_resource_alloc(struct ttm_buffer_object *bo,
 		       const struct ttm_place *place,
-		       struct ttm_resource *res);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
+		       struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
 
 void ttm_resource_manager_init(struct ttm_resource_manager *man,
 			       unsigned long p_size);
@@ -237,4 +276,20 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
 void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 				struct drm_printer *p);
 
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+			 struct io_mapping *iomap,
+			 struct sg_table *st,
+			 resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+			     struct ttm_device *bdev,
+			     struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+				  struct ttm_device *bdev,
+				  struct ttm_resource *mem);
 #endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 134d09ef7766f..818680c6a8ed9 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -29,6 +29,7 @@
 
 #include <linux/types.h>
 #include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
 
 struct ttm_bo_device;
 struct ttm_tt;
@@ -69,6 +70,18 @@ struct ttm_tt {
 	enum ttm_caching caching;
 };
 
+/**
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
+ */
+struct ttm_kmap_iter_tt {
+	struct ttm_kmap_iter base;
+	struct ttm_tt *tt;
+	pgprot_t prot;
+};
+
 static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
 {
 	return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
@@ -157,8 +170,24 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper
  */
 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
 
+/**
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
+ */
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+	ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+}
+
 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
 
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+					    struct ttm_tt *tt);
+
 #if IS_ENABLED(CONFIG_AGP)
 #include <linux/agp_backend.h>
 
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d44a77e8a7e34..562b885cf9c3d 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -78,19 +78,11 @@ struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
-/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
-{
-	return rcu_dereference_protected(obj->fence,
-					 dma_resv_held(obj));
-}
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_shared_max(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+#endif
 
 /**
  * dma_resv_lock - lock the reservation object
@@ -215,38 +207,29 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
  */
 static inline void dma_resv_unlock(struct dma_resv *obj)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
-	/* Test shared fence slot reservation */
-	if (rcu_access_pointer(obj->fence)) {
-		struct dma_resv_list *fence = dma_resv_get_list(obj);
-
-		fence->shared_max = fence->shared_count;
-	}
-#endif
+	dma_resv_reset_shared_max(obj);
 	ww_mutex_unlock(&obj->lock);
 }
 
 /**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
+ * dma_resv_exclusive - return the object's exclusive fence
  * @obj: the reservation object
  *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
  *
  * RETURNS
  * The exclusive fence or NULL
  */
 static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
+dma_resv_excl_fence(struct dma_resv *obj)
 {
-	return rcu_dereference_protected(obj->fence_excl,
-					 dma_resv_held(obj));
+	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
 }
 
 /**
- * dma_resv_get_excl_rcu - get the reservation object's
+ * dma_resv_get_excl_unlocked - get the reservation object's
  * exclusive fence, without lock held.
  * @obj: the reservation object
  *
@@ -257,7 +240,7 @@ dma_resv_get_excl(struct dma_resv *obj)
  * The exclusive fence or NULL if none
  */
 static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
+dma_resv_get_excl_unlocked(struct dma_resv *obj)
 {
 	struct dma_fence *fence;
 
@@ -271,23 +254,29 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
 	return fence;
 }
 
+/**
+ * dma_resv_shared_list - get the reservation object's shared fence list
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
+ */
+static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
+{
+	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
-			    struct dma_fence **pfence_excl,
-			    unsigned *pshared_count,
-			    struct dma_fence ***pshared);
-
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+			unsigned *pshared_count, struct dma_fence ***pshared);
 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
-			       unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+			   unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
 
 #endif /* _LINUX_RESERVATION_H */
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ec19db1eead86..061e700dd06c3 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -171,6 +171,7 @@ enum drm_panfrost_param {
 	DRM_PANFROST_PARAM_JS_FEATURES15,
 	DRM_PANFROST_PARAM_NR_CORE_GROUPS,
 	DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
+	DRM_PANFROST_PARAM_AFBC_FEATURES,
 };
 
 struct drm_panfrost_get_param {