diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2e9ce53ae3a8c..9c0c650655e93 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o
 # DRM - memory management
 nouveau-y += nouveau_bo.o
 nouveau-y += nouveau_gem.o
+nouveau-y += nouveau_mem.o
 nouveau-y += nouveau_prime.o
 nouveau-y += nouveau_sgdma.o
 nouveau-y += nouveau_ttm.o
+nouveau-y += nouveau_vmm.o
 
 # DRM - modesetting
 nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c02a13406a816..4b75ad40dd80e 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
 	help
 	  Selects the default debug level
 
+config NOUVEAU_DEBUG_MMU
+	bool "Enable additional MMU debugging"
+	depends on DRM_NOUVEAU
+	default n
+	help
+	  Say Y here if you want to enable verbose MMU debug output.
+
 config DRM_NOUVEAU_BACKLIGHT
 	bool "Support for backlight control"
 	depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 5b9d549aa791f..501d2d290e9c6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
 	if (!disp)
 		return -ENOMEM;
 
-	nvif_object_map(&drm->client.device.object);
+	nvif_object_map(&drm->client.device.object, NULL, 0);
 
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->dtor = nv04_display_destroy;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index aa94b8cf9679e..f508660110029 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -5,7 +5,7 @@ struct nv50_channel_dma_v0 {
 	__u8  version;
 	__u8  chid;
 	__u8  pad02[6];
-	__u64 vm;
+	__u64 vmm;
 	__u64 pushbuf;
 	__u64 offset;
 };
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 3b7101966de43..0e5bbb5531583 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -8,6 +8,6 @@ struct nv50_channel_gpfifo_v0 {
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 pushbuf;
-	__u64 vm;
+	__u64 vmm;
 };
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 91e33db21a2fe..7f6a8ce5a418d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -5,7 +5,7 @@ struct g82_channel_dma_v0 {
 	__u8  version;
 	__u8  chid;
 	__u8  pad02[6];
-	__u64 vm;
+	__u64 vmm;
 	__u64 pushbuf;
 	__u64 offset;
 };
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index e34efd4ec5371..c4d35522331a4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -8,7 +8,7 @@ struct g82_channel_gpfifo_v0 {
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 pushbuf;
-	__u64 vm;
+	__u64 vmm;
 };
 
 #define NV826F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index a2d5410a491b8..169161c1587f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -7,7 +7,7 @@ struct fermi_channel_gpfifo_v0 {
 	__u8  pad02[2];
 	__u32 ilength;
 	__u64 ioffset;
-	__u64 vm;
+	__u64 vmm;
 };
 
 #define NV906F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 2efa3d048bb9d..3e57089526e3a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -22,7 +22,7 @@ struct kepler_channel_gpfifo_a_v0 {
 	__u32 engines;
 	__u32 ilength;
 	__u64 ioffset;
-	__u64 vm;
+	__u64 vmm;
 };
 
 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index d08da82ba7ed4..56aade45067d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -14,6 +14,23 @@
 #define NVIF_CLASS_SW_NV50                           /* if0005.h */ -0x00000006
 #define NVIF_CLASS_SW_GF100                          /* if0005.h */ -0x00000007
 
+#define NVIF_CLASS_MMU                               /* if0008.h */  0x80000008
+#define NVIF_CLASS_MMU_NV04                          /* if0008.h */  0x80000009
+#define NVIF_CLASS_MMU_NV50                          /* if0008.h */  0x80005009
+#define NVIF_CLASS_MMU_GF100                         /* if0008.h */  0x80009009
+
+#define NVIF_CLASS_MEM                               /* if000a.h */  0x8000000a
+#define NVIF_CLASS_MEM_NV04                          /* if000b.h */  0x8000000b
+#define NVIF_CLASS_MEM_NV50                          /* if500b.h */  0x8000500b
+#define NVIF_CLASS_MEM_GF100                         /* if900b.h */  0x8000900b
+
+#define NVIF_CLASS_VMM                               /* if000c.h */  0x8000000c
+#define NVIF_CLASS_VMM_NV04                          /* if000d.h */  0x8000000d
+#define NVIF_CLASS_VMM_NV50                          /* if500d.h */  0x8000500d
+#define NVIF_CLASS_VMM_GF100                         /* if900d.h */  0x8000900d
+#define NVIF_CLASS_VMM_GM200                         /* ifb00d.h */  0x8000b00d
+#define NVIF_CLASS_VMM_GP100                         /* ifc00d.h */  0x8000c00d
+
 /* the below match nvidia-assigned (either in hw, or sw) class numbers */
 #define NV_NULL_CLASS                                                0x00000030
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index bcb9817116177..b579633b80c0d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -38,7 +38,6 @@ u64  nvif_device_time(struct nvif_device *);
 /*XXX*/
 #include <subdev/bios.h>
 #include <subdev/fb.h>
-#include <subdev/mmu.h>
 #include <subdev/bar.h>
 #include <subdev/gpio.h>
 #include <subdev/clk.h>
@@ -57,8 +56,6 @@ u64  nvif_device_time(struct nvif_device *);
 })
 #define nvxx_bios(a) nvxx_device(a)->bios
 #define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_mmu(a) nvxx_device(a)->mmu
-#define nvxx_bar(a) nvxx_device(a)->bar
 #define nvxx_gpio(a) nvxx_device(a)->gpio
 #define nvxx_clk(a) nvxx_device(a)->clk
 #define nvxx_i2c(a) nvxx_device(a)->i2c
@@ -66,10 +63,8 @@ u64  nvif_device_time(struct nvif_device *);
 #define nvxx_therm(a) nvxx_device(a)->therm
 #define nvxx_volt(a) nvxx_device(a)->volt
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
-#include <engine/sw.h>
 
 #define nvxx_fifo(a) nvxx_device(a)->fifo
 #define nvxx_gr(a) nvxx_device(a)->gr
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
new file mode 100644
index 0000000000000..8450127420f55
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_IF0008_H__
+#define __NVIF_IF0008_H__
+struct nvif_mmu_v0 {
+	__u8  version;
+	__u8  dmabits;
+	__u8  heap_nr;
+	__u8  type_nr;
+	__u16 kind_nr;
+};
+
+#define NVIF_MMU_V0_HEAP                                                   0x00
+#define NVIF_MMU_V0_TYPE                                                   0x01
+#define NVIF_MMU_V0_KIND                                                   0x02
+
+struct nvif_mmu_heap_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  pad02[6];
+	__u64 size;
+};
+
+struct nvif_mmu_type_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  heap;
+	__u8  vram;
+	__u8  host;
+	__u8  comp;
+	__u8  disp;
+	__u8  kind;
+	__u8  mappable;
+	__u8  coherent;
+	__u8  uncached;
+};
+
+struct nvif_mmu_kind_v0 {
+	__u8  version;
+	__u8  pad01[1];
+	__u16 count;
+	__u8  data[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000a.h b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
new file mode 100644
index 0000000000000..88d0938fbd5ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
@@ -0,0 +1,22 @@
+#ifndef __NVIF_IF000A_H__
+#define __NVIF_IF000A_H__
+struct nvif_mem_v0 {
+	__u8  version;
+	__u8  type;
+	__u8  page;
+	__u8  pad03[5];
+	__u64 size;
+	__u64 addr;
+	__u8  data[];
+};
+
+struct nvif_mem_ram_vn {
+};
+
+struct nvif_mem_ram_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	dma_addr_t *dma;
+	struct scatterlist *sgl;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000b.h b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
new file mode 100644
index 0000000000000..c677fb0293da8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
@@ -0,0 +1,11 @@
+#ifndef __NVIF_IF000B_H__
+#define __NVIF_IF000B_H__
+#include "if000a.h"
+
+struct nv04_mem_vn {
+	/* nvkm_mem_vX ... */
+};
+
+struct nv04_mem_map_vn {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
new file mode 100644
index 0000000000000..2928ecd989ad5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -0,0 +1,64 @@
+#ifndef __NVIF_IF000C_H__
+#define __NVIF_IF000C_H__
+struct nvif_vmm_v0 {
+	__u8  version;
+	__u8  page_nr;
+	__u8  pad02[6];
+	__u64 addr;
+	__u64 size;
+	__u8  data[];
+};
+
+#define NVIF_VMM_V0_PAGE                                                   0x00
+#define NVIF_VMM_V0_GET                                                    0x01
+#define NVIF_VMM_V0_PUT                                                    0x02
+#define NVIF_VMM_V0_MAP                                                    0x03
+#define NVIF_VMM_V0_UNMAP                                                  0x04
+
+struct nvif_vmm_page_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  shift;
+	__u8  sparse;
+	__u8  vram;
+	__u8  host;
+	__u8  comp;
+	__u8  pad07[1];
+};
+
+struct nvif_vmm_get_v0 {
+	__u8  version;
+#define NVIF_VMM_GET_V0_ADDR                                               0x00
+#define NVIF_VMM_GET_V0_PTES                                               0x01
+#define NVIF_VMM_GET_V0_LAZY	                                           0x02
+	__u8  type;
+	__u8  sparse;
+	__u8  page;
+	__u8  align;
+	__u8  pad05[3];
+	__u64 size;
+	__u64 addr;
+};
+
+struct nvif_vmm_put_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+};
+
+struct nvif_vmm_map_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+	__u64 size;
+	__u64 memory;
+	__u64 offset;
+	__u8  data[];
+};
+
+struct nvif_vmm_unmap_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000d.h b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
new file mode 100644
index 0000000000000..516ec9401401a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
@@ -0,0 +1,12 @@
+#ifndef __NVIF_IF000D_H__
+#define __NVIF_IF000D_H__
+#include "if000c.h"
+
+struct nv04_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct nv04_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500b.h b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
new file mode 100644
index 0000000000000..c7c8431fb2ced
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
@@ -0,0 +1,25 @@
+#ifndef __NVIF_IF500B_H__
+#define __NVIF_IF500B_H__
+#include "if000a.h"
+
+struct nv50_mem_vn {
+	/* nvif_mem_vX ... */
+};
+
+struct nv50_mem_v0 {
+	/* nvif_mem_vX ... */
+	__u8  version;
+	__u8  bankswz;
+	__u8  contig;
+};
+
+struct nv50_mem_map_vn {
+};
+
+struct nv50_mem_map_v0 {
+	__u8  version;
+	__u8  ro;
+	__u8  kind;
+	__u8  comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500d.h b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
new file mode 100644
index 0000000000000..c29a7822b3638
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF500D_H__
+#define __NVIF_IF500D_H__
+#include "if000c.h"
+
+struct nv50_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct nv50_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct nv50_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+	__u8  comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900b.h b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
new file mode 100644
index 0000000000000..9b164548eea85
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
@@ -0,0 +1,23 @@
+#ifndef __NVIF_IF900B_H__
+#define __NVIF_IF900B_H__
+#include "if000a.h"
+
+struct gf100_mem_vn {
+	/* nvif_mem_vX ... */
+};
+
+struct gf100_mem_v0 {
+	/* nvif_mem_vX ... */
+	__u8  version;
+	__u8  contig;
+};
+
+struct gf100_mem_map_vn {
+};
+
+struct gf100_mem_map_v0 {
+	__u8  version;
+	__u8  ro;
+	__u8  kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900d.h b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
new file mode 100644
index 0000000000000..49aa50583c3dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF900D_H__
+#define __NVIF_IF900D_H__
+#include "if000c.h"
+
+struct gf100_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gf100_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gf100_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
new file mode 100644
index 0000000000000..a0e4198305954
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
@@ -0,0 +1,27 @@
+#ifndef __NVIF_IFB00D_H__
+#define __NVIF_IFB00D_H__
+#include "if000c.h"
+
+struct gm200_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gm200_vmm_v0 {
+	/* nvif_vmm_vX ... */
+	__u8  version;
+	__u8  bigpage;
+};
+
+struct gm200_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gm200_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
new file mode 100644
index 0000000000000..1d9c637859f3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IFC00D_H__
+#define __NVIF_IFC00D_H__
+#include "if000c.h"
+
+struct gp100_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gp100_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gp100_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index c5f5eb83a5944..1886366457f18 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,7 +1,7 @@
 #ifndef __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 
-#define NVIF_VERSION_LATEST                               0x0000000000000000ULL
+#define NVIF_VERSION_LATEST                               0x0000000000000100ULL
 
 struct nvif_ioctl_v0 {
 	__u8  version;
@@ -83,9 +83,13 @@ struct nvif_ioctl_wr_v0 {
 struct nvif_ioctl_map_v0 {
 	/* nvif_ioctl ... */
 	__u8  version;
-	__u8  pad01[3];
-	__u32 length;
+#define NVIF_IOCTL_MAP_V0_IO                                               0x00
+#define NVIF_IOCTL_MAP_V0_VA                                               0x01
+	__u8  type;
+	__u8  pad02[6];
 	__u64 handle;
+	__u64 length;
+	__u8  data[];
 };
 
 struct nvif_ioctl_unmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
new file mode 100644
index 0000000000000..b542fe38398ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_MEM_H__
+#define __NVIF_MEM_H__
+#include "mmu.h"
+
+struct nvif_mem {
+	struct nvif_object object;
+	u8  type;
+	u8  page;
+	u64 addr;
+	u64 size;
+};
+
+int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+		       u64 size, void *argv, u32 argc, struct nvif_mem *);
+int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+		  u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_fini(struct nvif_mem *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
new file mode 100644
index 0000000000000..c8cd5b5b06889
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -0,0 +1,56 @@
+#ifndef __NVIF_MMU_H__
+#define __NVIF_MMU_H__
+#include <nvif/object.h>
+
+struct nvif_mmu {
+	struct nvif_object object;
+	u8  dmabits;
+	u8  heap_nr;
+	u8  type_nr;
+	u16 kind_nr;
+
+	struct {
+		u64 size;
+	} *heap;
+
+	struct {
+#define NVIF_MEM_VRAM                                                      0x01
+#define NVIF_MEM_HOST                                                      0x02
+#define NVIF_MEM_COMP                                                      0x04
+#define NVIF_MEM_DISP                                                      0x08
+#define NVIF_MEM_KIND                                                      0x10
+#define NVIF_MEM_MAPPABLE                                                  0x20
+#define NVIF_MEM_COHERENT                                                  0x40
+#define NVIF_MEM_UNCACHED                                                  0x80
+		u8 type;
+		u8 heap;
+	} *type;
+
+	u8 *kind;
+};
+
+int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
+void nvif_mmu_fini(struct nvif_mmu *);
+
+static inline bool
+nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
+{
+	const u8 invalid = mmu->kind_nr - 1;
+	if (kind) {
+		if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+			return false;
+	}
+	return true;
+}
+
+static inline int
+nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
+{
+	int i;
+	for (i = 0; i < mmu->type_nr; i++) {
+		if ((mmu->type[i].type & mask) == mask)
+			return i;
+	}
+	return -EINVAL;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 9e58b305b0203..0b54261bdefe5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -16,7 +16,7 @@ struct nvif_object {
 	void *priv; /*XXX: hack */
 	struct {
 		void __iomem *ptr;
-		u32 size;
+		u64 size;
 	} map;
 };
 
@@ -29,7 +29,10 @@ void nvif_object_sclass_put(struct nvif_sclass **);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
-int  nvif_object_map(struct nvif_object *);
+int  nvif_object_map_handle(struct nvif_object *, void *, u32,
+			    u64 *handle, u64 *length);
+void nvif_object_unmap_handle(struct nvif_object *);
+int  nvif_object_map(struct nvif_object *, void *, u32);
 void nvif_object_unmap(struct nvif_object *);
 
 #define nvif_handle(a) (unsigned long)(void *)(a)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 9fcab67c85577..5efdf80d5abcb 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -33,18 +33,4 @@
 
 #include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
-
-#ifndef ioread32_native
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native  ioread32be
-#define iowrite32_native iowrite32be
-#else /* def __BIG_ENDIAN */
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native  ioread32
-#define iowrite32_native iowrite32
-#endif /* def __BIG_ENDIAN else */
-#endif /* !ioread32_native */
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
new file mode 100644
index 0000000000000..c5db8a2e82df8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_VMM_H__
+#define __NVIF_VMM_H__
+#include <nvif/object.h>
+struct nvif_mem;
+struct nvif_mmu;
+
+enum nvif_vmm_get {
+	ADDR,
+	PTES,
+	LAZY
+};
+
+struct nvif_vma {
+	u64 addr;
+	u64 size;
+};
+
+struct nvif_vmm {
+	struct nvif_object object;
+	u64 start;
+	u64 limit;
+
+	struct {
+		u8 shift;
+		bool sparse:1;
+		bool vram:1;
+		bool host:1;
+		bool comp:1;
+	} *page;
+	int page_nr;
+};
+
+int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
+		  void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
+		 u8 page, u8 align, u64 size, struct nvif_vma *);
+void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
+int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
+		 struct nvif_mem *, u64 offset);
+int nvif_vmm_unmap(struct nvif_vmm *, u64);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index e876634da10a1..79624f6d0a2b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -16,7 +16,8 @@ struct nvkm_client {
 	void *data;
 	int (*ntfy)(const void *, u32, const void *, u32);
 
-	struct nvkm_vm *vm;
+	struct list_head umem;
+	spinlock_t lock;
 };
 
 int  nvkm_client_new(const char *name, u64 device, const char *cfg,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index bb4c214f10466..5046e1db99acd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,7 +1,7 @@
 #ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
+#include <core/oclass.h>
 #include <core/event.h>
-#include <core/object.h>
 
 enum nvkm_devidx {
 	NVKM_SUBDEV_PCI,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index d4cd2fbfde885..7730499bfd958 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -15,6 +15,7 @@ struct nvkm_engine {
 
 struct nvkm_engine_func {
 	void *(*dtor)(struct nvkm_engine *);
+	void (*preinit)(struct nvkm_engine *);
 	int (*oneinit)(struct nvkm_engine *);
 	int (*init)(struct nvkm_engine *);
 	int (*fini)(struct nvkm_engine *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index c23da4f059290..51691667b8130 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,17 +1,16 @@
 #ifndef __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
-#include <core/object.h>
 #include <core/memory.h>
 #include <core/mm.h>
-struct nvkm_vma;
-struct nvkm_vm;
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 struct nvkm_gpuobj {
-	struct nvkm_object object;
-	const struct nvkm_gpuobj_func *func;
+	union {
+		const struct nvkm_gpuobj_func *func;
+		const struct nvkm_gpuobj_func *ptrs;
+	};
 	struct nvkm_gpuobj *parent;
 	struct nvkm_memory *memory;
 	struct nvkm_mm_node *node;
@@ -28,15 +27,14 @@ struct nvkm_gpuobj_func {
 	void (*release)(struct nvkm_gpuobj *);
 	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
 	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+	int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
+		   struct nvkm_vma *, void *argv, u32 argc);
 };
 
 int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
 		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
 void nvkm_gpuobj_del(struct nvkm_gpuobj **);
 int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-		    struct nvkm_vma *);
-void nvkm_gpuobj_unmap(struct nvkm_vma *);
 void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
 			   u32 length);
 void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 33ca6769266a5..13ebf4da2b963 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -3,7 +3,12 @@
 #include <core/os.h>
 struct nvkm_device;
 struct nvkm_vma;
-struct nvkm_vm;
+struct nvkm_vmm;
+
+struct nvkm_tags {
+	struct nvkm_mm_node *mn;
+	refcount_t refcount;
+};
 
 enum nvkm_memory_target {
 	NVKM_MEM_TARGET_INST, /* instance memory */
@@ -14,41 +19,84 @@ enum nvkm_memory_target {
 
 struct nvkm_memory {
 	const struct nvkm_memory_func *func;
+	const struct nvkm_memory_ptrs *ptrs;
+	struct kref kref;
+	struct nvkm_tags *tags;
 };
 
 struct nvkm_memory_func {
 	void *(*dtor)(struct nvkm_memory *);
 	enum nvkm_memory_target (*target)(struct nvkm_memory *);
+	u8 (*page)(struct nvkm_memory *);
 	u64 (*addr)(struct nvkm_memory *);
 	u64 (*size)(struct nvkm_memory *);
-	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+	void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
 	void __iomem *(*acquire)(struct nvkm_memory *);
 	void (*release)(struct nvkm_memory *);
+	int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
+		   struct nvkm_vma *, void *argv, u32 argc);
+};
+
+struct nvkm_memory_ptrs {
 	u32 (*rd32)(struct nvkm_memory *, u64 offset);
 	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
-	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
 };
 
 void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
 int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
 		    u64 size, u32 align, bool zero, struct nvkm_memory **);
-void nvkm_memory_del(struct nvkm_memory **);
+struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
+void nvkm_memory_unref(struct nvkm_memory **);
+int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
+			 void (*clear)(struct nvkm_device *, u32, u32),
+			 struct nvkm_tags **);
+void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
+			  struct nvkm_tags **);
+
 #define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_page(p) (p)->func->page(p)
 #define nvkm_memory_addr(p) (p)->func->addr(p)
 #define nvkm_memory_size(p) (p)->func->size(p)
 #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
-#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+#define nvkm_memory_map(p,o,vm,va,av,ac)                                       \
+	(p)->func->map((p),(o),(vm),(va),(av),(ac))
 
 /* accessor macros - kmap()/done() must bracket use of the other accessor
  * macros to guarantee correct behaviour across all chipsets
  */
 #define nvkm_kmap(o)     (o)->func->acquire(o)
-#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
-#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_done(o)     (o)->func->release(o)
+
+#define nvkm_ro32(o,a)   (o)->ptrs->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
 #define nvkm_mo32(o,a,m,d) ({                                                  \
 	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
 	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
 	_data;                                                                 \
 })
-#define nvkm_done(o)     (o)->func->release(o)
+
+#define nvkm_wo64(o,a,d) do {                                                  \
+	u64 __a = (a), __d = (d);                                              \
+	nvkm_wo32((o), __a + 0, lower_32_bits(__d));                           \
+	nvkm_wo32((o), __a + 4, upper_32_bits(__d));                           \
+} while(0)
+
+#define nvkm_fill(t,s,o,a,d,c) do {                                            \
+	u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s;          \
+	u##t __iomem *_m = nvkm_kmap(o);                                       \
+	if (likely(_m)) {                                                      \
+		if (_d) {                                                      \
+			while (_c--)                                           \
+				iowrite##t##_native(_d, &_m[_o++]);            \
+		} else {                                                       \
+			memset_io(&_m[_o], _d, _s);                            \
+		}                                                              \
+	} else {                                                               \
+		for (; _c; _c--, _a += BIT(s))                                 \
+			nvkm_wo##t((o), _a, _d);                               \
+	}                                                                      \
+	nvkm_done(o);                                                          \
+} while(0)
+#define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c))
+#define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c))
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 7bd4897a8a2ad..5c1261351138b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
 	return mm->heap_nodes;
 }
 
-int  nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int  nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
 int  nvkm_mm_fini(struct nvkm_mm *);
 int  nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
@@ -39,9 +39,39 @@ int  nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
 void nvkm_mm_dump(struct nvkm_mm *, const char *);
 
+static inline u32
+nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
+{
+	struct nvkm_mm_node *node;
+	u32 size = 0;
+	list_for_each_entry(node, &mm->nodes, nl_entry) {
+		if (node->heap == heap)
+			size += node->length;
+	}
+	return size;
+}
+
 static inline bool
 nvkm_mm_contiguous(struct nvkm_mm_node *node)
 {
 	return !node->next;
 }
+
+static inline u32
+nvkm_mm_addr(struct nvkm_mm_node *node)
+{
+	if (WARN_ON(!nvkm_mm_contiguous(node)))
+		return 0;
+	return node->offset;
+}
+
+static inline u32
+nvkm_mm_size(struct nvkm_mm_node *node)
+{
+	u32 size = 0;
+	do {
+		size += node->length;
+	} while ((node = node->next));
+	return size;
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 96dda350ada31..916a4b76d430b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,10 +1,8 @@
 #ifndef __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
-#include <core/os.h>
-#include <core/debug.h>
+#include <core/oclass.h>
 struct nvkm_event;
 struct nvkm_gpuobj;
-struct nvkm_oclass;
 
 struct nvkm_object {
 	const struct nvkm_object_func *func;
@@ -21,13 +19,20 @@ struct nvkm_object {
 	struct rb_node node;
 };
 
+enum nvkm_object_map {
+	NVKM_OBJECT_MAP_IO,
+	NVKM_OBJECT_MAP_VA
+};
+
 struct nvkm_object_func {
 	void *(*dtor)(struct nvkm_object *);
 	int (*init)(struct nvkm_object *);
 	int (*fini)(struct nvkm_object *, bool suspend);
 	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
 	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-	int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+	int (*map)(struct nvkm_object *, void *argv, u32 argc,
+		   enum nvkm_object_map *, u64 *addr, u64 *size);
+	int (*unmap)(struct nvkm_object *);
 	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
 	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
 	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
@@ -52,7 +57,9 @@ int nvkm_object_init(struct nvkm_object *);
 int nvkm_object_fini(struct nvkm_object *, bool suspend);
 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+		    enum nvkm_object_map *, u64 *addr, u64 *size);
+int nvkm_object_unmap(struct nvkm_object *);
 int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
 int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
 int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
@@ -66,28 +73,4 @@ bool nvkm_object_insert(struct nvkm_object *);
 void nvkm_object_remove(struct nvkm_object *);
 struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
 				       const struct nvkm_object_func *);
-
-struct nvkm_sclass {
-	int minver;
-	int maxver;
-	s32 oclass;
-	const struct nvkm_object_func *func;
-	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-};
-
-struct nvkm_oclass {
-	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-	struct nvkm_sclass base;
-	const void *priv;
-	const void *engn;
-	u32 handle;
-	u8  route;
-	u64 token;
-	u64 object;
-	struct nvkm_client *client;
-	struct nvkm_object *parent;
-	struct nvkm_engine *engine;
-};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
new file mode 100644
index 0000000000000..8e1b945d38f38
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_OCLASS_H__
+#define __NVKM_OCLASS_H__
+#include <core/os.h>
+#include <core/debug.h>
+struct nvkm_oclass;
+struct nvkm_object;
+
+struct nvkm_sclass {
+	int minver;
+	int maxver;
+	s32 oclass;
+	const struct nvkm_object_func *func;
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const void *priv;
+	const void *engn;
+	u32 handle;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct nvkm_client *client;
+	struct nvkm_object *parent;
+	struct nvkm_engine *engine;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index cd57e238ddd34..1f0108fdd24af 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -1,4 +1,23 @@
 #ifndef __NVKM_OS_H__
 #define __NVKM_OS_H__
 #include <nvif/os.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native  ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native  ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do {                                             \
+	u32 __iomem *_p = (u32 __iomem *)(p);				       \
+	u64 _v = (v);							       \
+	iowrite32_native(lower_32_bits(_v), &_p[0]);			       \
+	iowrite32_native(upper_32_bits(_v), &_p[1]);			       \
+} while(0)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index 5ee6298991e21..8a48ca67f60db 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_RAMHT_H__
 #define __NVKM_RAMHT_H__
 #include <core/gpuobj.h>
+struct nvkm_object;
 
 struct nvkm_ramht_data {
 	struct nvkm_gpuobj *inst;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index ca9ed3d68f44e..a6c21be7537f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -33,7 +33,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
 /* subdev logging */
 #define nvkm_printk_(s,l,p,f,a...) do {                                        \
 	const struct nvkm_subdev *_subdev = (s);                               \
-	if (_subdev->debug >= (l)) {                                           \
+	if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) {            \
 		dev_##p(_subdev->device->dev, "%s: "f,                         \
 			nvkm_subdev_name[_subdev->index], ##a);                \
 	}                                                                      \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index d2a6532ce3b99..b672a3b07f554 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_DMA_H__
 #define __NVKM_DMA_H__
 #include <core/engine.h>
+#include <core/object.h>
 struct nvkm_client;
 
 struct nvkm_dmaobj {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index e1a854e2ade1a..f0024fb5a5aff 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -3,6 +3,7 @@
 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
 struct nvkm_fifo_chan;
+struct nvkm_gpuobj;
 
 enum nvkm_falcon_dmaidx {
 	FALCON_DMAIDX_UCODE		= 0,
@@ -77,7 +78,7 @@ struct nvkm_falcon_func {
 	void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
 	void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
 	void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
-	void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
+	void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
 	int (*wait_for_halt)(struct nvkm_falcon *, u32);
 	int (*clear_interrupt)(struct nvkm_falcon *, u32);
 	void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
@@ -112,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
 			   bool);
 void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
 void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
-void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
+void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
 void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
 void nvkm_falcon_start(struct nvkm_falcon *);
 int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index f00527b36accf..e42d686fbd8b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
 #include <core/engine.h>
+#include <core/object.h>
 #include <core/event.h>
 
 #define NVKM_FIFO_CHID_NR 4096
@@ -21,7 +22,7 @@ struct nvkm_fifo_chan {
 	u16 chid;
 	struct nvkm_gpuobj *inst;
 	struct nvkm_gpuobj *push;
-	struct nvkm_vm *vm;
+	struct nvkm_vmm *vmm;
 	void __iomem *user;
 	u64 addr;
 	u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index d3071b5a4f982..ffa963939e15b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -8,17 +8,22 @@ struct nvkm_bar {
 	struct nvkm_subdev subdev;
 
 	spinlock_t lock;
+	bool bar2;
 
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	bool iomap_uncached;
 };
 
+struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar2_init(struct nvkm_device *);
+void nvkm_bar_bar2_fini(struct nvkm_device *);
+struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
 void nvkm_bar_flush(struct nvkm_bar *);
-struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
-int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
 
 int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 28d513fbf44c3..a00fd2e59215c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -1,8 +1,7 @@
 #ifndef __NVKM_FB_H__
 #define __NVKM_FB_H__
 #include <core/subdev.h>
-
-#include <subdev/mmu.h>
+#include <core/mm.h>
 
 /* memory type/access flags, do not match hardware values */
 #define NV_MEM_ACCESS_RO  1
@@ -21,22 +20,6 @@
 #define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 
-struct nvkm_mem {
-	struct drm_device *dev;
-
-	struct nvkm_vma bar_vma;
-	struct nvkm_vma vma[2];
-	u8  page_shift;
-
-	struct nvkm_mm_node *tag;
-	struct nvkm_mm_node *mem;
-	dma_addr_t *pages;
-	u32 memtype;
-	u64 offset;
-	u64 size;
-	struct sg_table *sg;
-};
-
 struct nvkm_fb_tile {
 	struct nvkm_mm_node *tag;
 	u32 addr;
@@ -50,6 +33,7 @@ struct nvkm_fb {
 	struct nvkm_subdev subdev;
 
 	struct nvkm_ram *ram;
+	struct nvkm_mm tags;
 
 	struct {
 		struct nvkm_fb_tile region[16];
@@ -62,7 +46,6 @@ struct nvkm_fb {
 	struct nvkm_memory *mmu_wr;
 };
 
-bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
 void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
@@ -129,8 +112,11 @@ struct nvkm_ram {
 	u64 size;
 
 #define NVKM_RAM_MM_SHIFT 12
+#define NVKM_RAM_MM_ANY    (NVKM_MM_HEAP_ANY + 0)
+#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
+#define NVKM_RAM_MM_NOMAP  (NVKM_MM_HEAP_ANY + 2)
+#define NVKM_RAM_MM_MIXED  (NVKM_MM_HEAP_ANY + 3)
 	struct nvkm_mm vram;
-	struct nvkm_mm tags;
 	u64 stolen;
 
 	int ranks;
@@ -147,6 +133,10 @@ struct nvkm_ram {
 	struct nvkm_ram_data target;
 };
 
+int
+nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+	     bool contig, bool back, struct nvkm_memory **);
+
 struct nvkm_ram_func {
 	u64 upper;
 	u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
@@ -157,14 +147,8 @@ struct nvkm_ram_func {
 	void *(*dtor)(struct nvkm_ram *);
 	int (*init)(struct nvkm_ram *);
 
-	int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
-		   u32 type, struct nvkm_mem **);
-	void (*put)(struct nvkm_ram *, struct nvkm_mem **);
-
 	int (*calc)(struct nvkm_ram *, u32 freq);
 	int (*prog)(struct nvkm_ram *);
 	void (*tidy)(struct nvkm_ram *);
 };
-
-extern const u8 gf100_pte_storage_type_map[256];
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 40f845e312723..8111c0c3c5ecd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -9,6 +9,7 @@ struct nvkm_instmem {
 
 	spinlock_t lock;
 	struct list_head list;
+	struct list_head boot;
 	u32 reserved;
 
 	struct nvkm_memory *vbios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd755baf9cab7..4a224fd22e48b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -14,8 +14,7 @@ struct nvkm_ltc {
 
 	u32 num_tags;
 	u32 tag_base;
-	struct nvkm_mm tags;
-	struct nvkm_mm_node *tag_ram;
+	struct nvkm_memory *tag_ram;
 
 	int zbc_min;
 	int zbc_max;
@@ -23,9 +22,7 @@ struct nvkm_ltc {
 	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 };
 
-int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
-void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
 
 int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
 int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index dcd3deff27a49..975c42f620a08 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -1,68 +1,130 @@
 #ifndef __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #include <core/subdev.h>
-#include <core/mm.h>
-struct nvkm_device;
-struct nvkm_mem;
-
-struct nvkm_vm_pgt {
-	struct nvkm_memory *mem[2];
-	u32 refcount[2];
-};
-
-struct nvkm_vm_pgd {
-	struct list_head head;
-	struct nvkm_gpuobj *obj;
-};
 
 struct nvkm_vma {
 	struct list_head head;
-	int refcount;
-	struct nvkm_vm *vm;
-	struct nvkm_mm_node *node;
-	u64 offset;
-	u32 access;
+	struct rb_node tree;
+	u64 addr;
+	u64 size:50;
+	bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
+	bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
+#define NVKM_VMA_PAGE_NONE 7
+	u8   page:3; /* Requested page type (index, or NONE for automatic). */
+	u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
+	bool used:1; /* Region allocated. */
+	bool part:1; /* Region was split from an allocated region by map(). */
+	bool user:1; /* Region user-allocated. */
+	bool busy:1; /* Region busy (for temporarily preventing user access). */
+	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
+	struct nvkm_tags *tags; /* Compression tag reference. */
 };
 
-struct nvkm_vm {
+struct nvkm_vmm {
+	const struct nvkm_vmm_func *func;
 	struct nvkm_mmu *mmu;
-
+	const char *name;
+	u32 debug;
+	struct kref kref;
 	struct mutex mutex;
-	struct nvkm_mm mm;
-	struct kref refcount;
 
-	struct list_head pgd_list;
+	u64 start;
+	u64 limit;
+
+	struct nvkm_vmm_pt *pd;
+	struct list_head join;
+
+	struct list_head list;
+	struct rb_root free;
+	struct rb_root root;
+
+	bool bootstrapped;
 	atomic_t engref[NVKM_SUBDEV_NR];
 
-	struct nvkm_vm_pgt *pgt;
-	u32 fpde;
-	u32 lpde;
+	dma_addr_t null;
+	void *nullp;
 };
 
-int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-		 struct lock_class_key *, struct nvkm_vm **);
-int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
-int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
-int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
-		 struct nvkm_vma *);
-void nvkm_vm_put(struct nvkm_vma *);
-void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
-void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
-void nvkm_vm_unmap(struct nvkm_vma *);
-void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
+		 struct lock_class_key *, const char *name, struct nvkm_vmm **);
+struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
+void nvkm_vmm_unref(struct nvkm_vmm **);
+int nvkm_vmm_boot(struct nvkm_vmm *);
+int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
+void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
+int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
+void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
+
+struct nvkm_vmm_map {
+	struct nvkm_memory *memory;
+	u64 offset;
+
+	struct nvkm_mm_node *mem;
+	struct scatterlist *sgl;
+	dma_addr_t *dma;
+	u64 off;
+
+	const struct nvkm_vmm_page *page;
+
+	struct nvkm_tags *tags;
+	u64 next;
+	u64 type;
+	u64 ctag;
+};
+
+int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
+		 struct nvkm_vmm_map *);
+void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
+
+struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
+struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
 
 struct nvkm_mmu {
 	const struct nvkm_mmu_func *func;
 	struct nvkm_subdev subdev;
 
-	u64 limit;
 	u8  dma_bits;
-	u8  lpg_shift;
+
+	int heap_nr;
+	struct {
+#define NVKM_MEM_VRAM                                                      0x01
+#define NVKM_MEM_HOST                                                      0x02
+#define NVKM_MEM_COMP                                                      0x04
+#define NVKM_MEM_DISP                                                      0x08
+		u8  type;
+		u64 size;
+	} heap[4];
+
+	int type_nr;
+	struct {
+#define NVKM_MEM_KIND                                                      0x10
+#define NVKM_MEM_MAPPABLE                                                  0x20
+#define NVKM_MEM_COHERENT                                                  0x40
+#define NVKM_MEM_UNCACHED                                                  0x80
+		u8 type;
+		u8 heap;
+	} type[16];
+
+	struct nvkm_vmm *vmm;
+
+	struct {
+		struct mutex mutex;
+		struct list_head list;
+	} ptc, ptp;
+
+	struct nvkm_device_oclass user;
 };
 
 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 1bfd93b85575d..9841f076da2e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -97,4 +97,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index f98f800cc0112..ece650a0c5f9f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -34,6 +34,7 @@
 #include "nouveau_gem.h"
 #include "nouveau_chan.h"
 #include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
 
 static struct nouveau_abi16 *
 nouveau_abi16(struct drm_file *file_priv)
@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 	}
 
 	if (chan->ntfy) {
-		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+		nouveau_vma_del(&chan->ntfy_vma);
 		nouveau_bo_unpin(chan->ntfy);
 		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
 	}
@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		getparam->value = device->info.chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		if (nvxx_device(device)->func->pci)
+		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
 			getparam->value = dev->pdev->vendor;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		if (nvxx_device(device)->func->pci)
+		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
 			getparam->value = dev->pdev->device;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
-		if (!nvxx_device(device)->func->pci)
-			getparam->value = 3;
-		else
-		if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
-			getparam->value = 0;
-		else
-		if (!pci_is_pcie(dev->pdev))
-			getparam->value = 1;
-		else
-			getparam->value = 2;
-		break;
+		switch (device->info.platform) {
+		case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
+		case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
+		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
+		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
+		case NV_DEVICE_INFO_V0_IGP :
+			if (!pci_is_pcie(dev->pdev))
+				getparam->value = 1;
+			else
+				getparam->value = 2;
+			break;
+		default:
+			WARN_ON(1);
+			break;
+		}
 	case NOUVEAU_GETPARAM_FB_SIZE:
 		getparam->value = drm->gem.vram_available;
 		break;
@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 		goto done;
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
-					&chan->ntfy_vma);
+		ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
 		if (ret)
 			goto done;
 	}
@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	if (ret)
 		goto done;
 
-	ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+	ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
 done:
 	if (ret)
 		nouveau_abi16_chan_fini(abi16, chan);
@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
-		args.start += chan->ntfy_vma.offset;
-		args.limit += chan->ntfy_vma.offset;
+		args.start += chan->ntfy_vma->addr;
+		args.limit += chan->ntfy_vma->addr;
 	} else
 	if (drm->agp.bridge) {
 		args.target = NV_DMA_V0_TARGET_AGP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 841cc556fad83..3277476803248 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -23,7 +23,7 @@ struct nouveau_abi16_chan {
 	struct nouveau_channel *chan;
 	struct list_head notifiers;
 	struct nouveau_bo *ntfy;
-	struct nvkm_vma ntfy_vma;
+	struct nouveau_vma *ntfy_vma;
 	struct nvkm_mm  heap;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dd6fba55ad5d0..c4ef3a0a737ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 		case 1:
 			entry->dpconf.link_bw = 270000;
 			break;
-		default:
+		case 2:
 			entry->dpconf.link_bw = 540000;
 			break;
+		case 3:
+		default:
+			entry->dpconf.link_bw = 810000;
+			break;
 		}
 		switch ((conf & 0x0f000000) >> 24) {
 		case 0xf:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e427f80344c4d..2615912430cc9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -37,6 +37,12 @@
 #include "nouveau_bo.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
 
 /*
  * NV10-NV40 tiling helpers
@@ -48,8 +54,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i = reg - drm->tile.reg;
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
-	struct nvkm_fb *fb = device->fb;
+	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 
 	nouveau_fence_unref(&reg->fence);
@@ -97,7 +102,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
 
 static struct nouveau_drm_tile *
 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
-		   u32 size, u32 pitch, u32 flags)
+		   u32 size, u32 pitch, u32 zeta)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
@@ -120,8 +125,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 	}
 
 	if (found)
-		nv10_bo_update_tile_region(dev, found, addr, size,
-					    pitch, flags);
+		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
 	return found;
 }
 
@@ -155,27 +159,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 	struct nvif_device *device = &drm->client.device;
 
 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
-		if (nvbo->tile_mode) {
+		if (nvbo->mode) {
 			if (device->info.chipset >= 0x40) {
 				*align = 65536;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x30) {
 				*align = 32768;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x20) {
 				*align = 16384;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x10) {
 				*align = 16384;
-				*size = roundup_64(*size, 32 * nvbo->tile_mode);
+				*size = roundup_64(*size, 32 * nvbo->mode);
 			}
 		}
 	} else {
-		*size = roundup_64(*size, (1 << nvbo->page_shift));
-		*align = max((1 <<  nvbo->page_shift), *align);
+		*size = roundup_64(*size, (1 << nvbo->page));
+		*align = max((1 <<  nvbo->page), *align);
 	}
 
 	*size = roundup_64(*size, PAGE_SIZE);
@@ -187,11 +191,13 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 	       struct sg_table *sg, struct reservation_object *robj,
 	       struct nouveau_bo **pnvbo)
 {
-	struct nouveau_drm *drm = nouveau_drm(cli->dev);
+	struct nouveau_drm *drm = cli->drm;
 	struct nouveau_bo *nvbo;
+	struct nvif_mmu *mmu = &cli->mmu;
+	struct nvif_vmm *vmm = &cli->vmm.vmm;
 	size_t acc_size;
-	int ret;
 	int type = ttm_bo_type_device;
+	int ret, i, pi = -1;
 
 	if (!size) {
 		NV_WARN(drm, "skipped size %016llx\n", size);
@@ -207,19 +213,80 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 	INIT_LIST_HEAD(&nvbo->head);
 	INIT_LIST_HEAD(&nvbo->entry);
 	INIT_LIST_HEAD(&nvbo->vma_list);
-	nvbo->tile_mode = tile_mode;
-	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 	nvbo->cli = cli;
 
-	if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
-		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+	/* This is confusing, and doesn't actually mean we want an uncached
+	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+	 * into in nouveau_gem_new().
+	 */
+	if (flags & TTM_PL_FLAG_UNCACHED) {
+		/* Determine if we can get a cache-coherent map, forcing
+		 * uncached mapping if we can't.
+		 */
+		if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+			nvbo->force_coherent = true;
+	}
+
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+			kfree(nvbo);
+			return -EINVAL;
+		}
+
+		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+	} else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+		nvbo->comp = (tile_flags & 0x00030000) >> 16;
+		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+			kfree(nvbo);
+			return -EINVAL;
+		}
+	} else {
+		nvbo->zeta = (tile_flags & 0x00000007);
+	}
+	nvbo->mode = tile_mode;
+	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+
+	/* Determine the desirable target GPU page size for the buffer. */
+	for (i = 0; i < vmm->page_nr; i++) {
+		/* Because we cannot currently allow VMM maps to fail
+		 * during buffer migration, we need to determine page
+		 * size for the buffer up-front, and pre-allocate its
+		 * page tables.
+		 *
+		 * Skip page sizes that can't support needed domains.
+		 */
+		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+			continue;
+		if ((flags & TTM_PL_FLAG_TT  ) && !vmm->page[i].host)
+			continue;
+
+		/* Select this page size if it's the first that supports
+		 * the potential memory domains, or when it's compatible
+		 * with the requested compression settings.
+		 */
+		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+			pi = i;
+
+		/* Stop once the buffer is larger than the current page size. */
+		if (size >= 1ULL << vmm->page[i].shift)
+			break;
+	}
+
+	if (WARN_ON(pi < 0))
+		return -EINVAL;
 
-	nvbo->page_shift = 12;
-	if (drm->client.vm) {
-		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
-			nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+	/* Disable compression if suitable settings couldn't be found. */
+	if (nvbo->comp && !vmm->page[pi].comp) {
+		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+			nvbo->kind = mmu->kind[nvbo->kind];
+		nvbo->comp = 0;
 	}
+	nvbo->page = vmm->page[pi].shift;
 
 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
@@ -262,7 +329,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 	unsigned i, fpfn, lpfn;
 
 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
-	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
 		 * Make sure that the color and depth buffers are handled
@@ -270,7 +337,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 		 * speed up when alpha-blending and depth-test are enabled
 		 * at the same time.
 		 */
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+		if (nvbo->zeta) {
 			fpfn = vram_pages / 2;
 			lpfn = ~0;
 		} else {
@@ -321,14 +388,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 	    memtype == TTM_PL_FLAG_VRAM && contig) {
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
-			if (bo->mem.mem_type == TTM_PL_VRAM) {
-				struct nvkm_mem *mem = bo->mem.mm_node;
-				if (!nvkm_mm_contiguous(mem->mem))
-					evict = true;
-			}
-			nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+		if (!nvbo->contig) {
+			nvbo->contig = true;
 			force = true;
+			evict = true;
 		}
 	}
 
@@ -376,7 +439,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 
 out:
 	if (force && ret)
-		nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
+		nvbo->contig = false;
 	ttm_bo_unreserve(bo);
 	return ret;
 }
@@ -446,7 +509,6 @@ void
 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	int i;
 
@@ -458,7 +520,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+		dma_sync_single_for_device(drm->dev->dev,
+					   ttm_dma->dma_address[i],
 					   PAGE_SIZE, DMA_TO_DEVICE);
 }
 
@@ -466,7 +529,6 @@ void
 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	int i;
 
@@ -478,7 +540,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
 					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 
@@ -568,6 +630,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 			 struct ttm_mem_type_manager *man)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nvif_mmu *mmu = &drm->client.mmu;
 
 	switch (type) {
 	case TTM_PL_SYSTEM:
@@ -584,7 +647,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 
 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 			/* Some BARs do not support being ioremapped WC */
-			if (nvxx_bar(&drm->client.device)->iomap_uncached) {
+			const u8 type = mmu->type[drm->ttm.type_vram].type;
+			if (type & NVIF_MEM_UNCACHED) {
 				man->available_caching = TTM_PL_FLAG_UNCACHED;
 				man->default_caching = TTM_PL_FLAG_UNCACHED;
 			}
@@ -659,14 +723,14 @@ static int
 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 10);
 	if (ret == 0) {
 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
@@ -691,9 +755,9 @@ static int
 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 
@@ -729,9 +793,9 @@ static int
 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 
@@ -768,9 +832,9 @@ static int
 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 
@@ -806,14 +870,14 @@ static int
 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, 0x00000000 /* COPY */);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 	}
@@ -824,15 +888,15 @@ static int
 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
 	}
 	return ret;
@@ -858,12 +922,12 @@ static int
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	u64 length = (new_reg->num_pages << PAGE_SHIFT);
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
-	int src_tiled = !!mem->memtype;
-	int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
+	int src_tiled = !!mem->kind;
+	int dst_tiled = !!nouveau_mem(new_reg)->kind;
 	int ret;
 
 	while (length) {
@@ -1000,25 +1064,31 @@ static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 		     struct ttm_mem_reg *reg)
 {
-	struct nvkm_mem *old_mem = bo->mem.mm_node;
-	struct nvkm_mem *new_mem = reg->mm_node;
-	u64 size = (u64)reg->num_pages << PAGE_SHIFT;
+	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+	struct nouveau_mem *new_mem = nouveau_mem(reg);
+	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
 	int ret;
 
-	ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
-			  NV_MEM_ACCESS_RW, &old_mem->vma[0]);
+	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
+			   old_mem->mem.size, &old_mem->vma[0]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
-			  NV_MEM_ACCESS_RW, &old_mem->vma[1]);
+	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
+			   new_mem->mem.size, &old_mem->vma[1]);
+	if (ret)
+		goto done;
+
+	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
+	if (ret)
+		goto done;
+
+	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
+done:
 	if (ret) {
-		nvkm_vm_put(&old_mem->vma[0]);
-		return ret;
+		nvif_vmm_put(vmm, &old_mem->vma[1]);
+		nvif_vmm_put(vmm, &old_mem->vma[0]);
 	}
-
-	nvkm_vm_map(&old_mem->vma[0], old_mem);
-	nvkm_vm_map(&old_mem->vma[1], new_mem);
 	return 0;
 }
 
@@ -1200,21 +1270,23 @@ static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
 		     struct ttm_mem_reg *new_reg)
 {
+	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 
 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
 	if (bo->destroy != nouveau_bo_del_ttm)
 		return;
 
-	list_for_each_entry(vma, &nvbo->vma_list, head) {
-		if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
-			      (new_reg->mem_type == TTM_PL_VRAM ||
-			       nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
-			nvkm_vm_map(vma, new_reg->mm_node);
-		} else {
+	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
+	    mem->mem.page == nvbo->page) {
+		list_for_each_entry(vma, &nvbo->vma_list, head) {
+			nouveau_vma_map(vma, mem);
+		}
+	} else {
+		list_for_each_entry(vma, &nvbo->vma_list, head) {
 			WARN_ON(ttm_bo_wait(bo, false, false));
-			nvkm_vm_unmap(vma);
+			nouveau_vma_unmap(vma);
 		}
 	}
 }
@@ -1234,8 +1306,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
-						nvbo->tile_mode,
-						nvbo->tile_flags);
+					       nvbo->mode, nvbo->zeta);
 	}
 
 	return 0;
@@ -1331,8 +1402,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
-	struct nvkm_mem *mem = reg->mm_node;
-	int ret;
+	struct nouveau_mem *mem = nouveau_mem(reg);
 
 	reg->bus.addr = NULL;
 	reg->bus.offset = 0;
@@ -1353,7 +1423,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 			reg->bus.is_iomem = !drm->agp.cma;
 		}
 #endif
-		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
+		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
 			/* untiled */
 			break;
 		/* fallthrough, tiled memory */
@@ -1361,19 +1431,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 		reg->bus.offset = reg->start << PAGE_SHIFT;
 		reg->bus.base = device->func->resource_addr(device, 1);
 		reg->bus.is_iomem = true;
-		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-			struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
-			int page_shift = 12;
-			if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
-				page_shift = mem->page_shift;
+		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+			union {
+				struct nv50_mem_map_v0 nv50;
+				struct gf100_mem_map_v0 gf100;
+			} args;
+			u64 handle, length;
+			u32 argc = 0;
+			int ret;
+
+			switch (mem->mem.object.oclass) {
+			case NVIF_CLASS_MEM_NV50:
+				args.nv50.version = 0;
+				args.nv50.ro = 0;
+				args.nv50.kind = mem->kind;
+				args.nv50.comp = mem->comp;
+				break;
+			case NVIF_CLASS_MEM_GF100:
+				args.gf100.version = 0;
+				args.gf100.ro = 0;
+				args.gf100.kind = mem->kind;
+				break;
+			default:
+				WARN_ON(1);
+				break;
+			}
 
-			ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
-					    &mem->bar_vma);
-			if (ret)
-				return ret;
+			ret = nvif_object_map_handle(&mem->mem.object,
+						     &argc, argc,
+						     &handle, &length);
+			if (ret != 1)
+				return ret ? ret : -EINVAL;
 
-			nvkm_vm_map(&mem->bar_vma, mem);
-			reg->bus.offset = mem->bar_vma.offset;
+			reg->bus.base = 0;
+			reg->bus.offset = handle;
 		}
 		break;
 	default:
@@ -1385,13 +1476,22 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 {
-	struct nvkm_mem *mem = reg->mm_node;
-
-	if (!mem->bar_vma.node)
-		return;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_mem *mem = nouveau_mem(reg);
 
-	nvkm_vm_unmap(&mem->bar_vma);
-	nvkm_vm_put(&mem->bar_vma);
+	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+		switch (reg->mem_type) {
+		case TTM_PL_TT:
+			if (mem->kind)
+				nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		case TTM_PL_VRAM:
+			nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		default:
+			break;
+		}
+	}
 }
 
 static int
@@ -1408,7 +1508,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 	 */
 	if (bo->mem.mem_type != TTM_PL_VRAM) {
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
-		    !nouveau_bo_tile_layout(nvbo))
+		    !nvbo->kind)
 			return 0;
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@@ -1445,9 +1545,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
-	struct nvkm_device *device;
-	struct drm_device *dev;
-	struct device *pdev;
+	struct device *dev;
 	unsigned i;
 	int r;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1464,9 +1562,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 	}
 
 	drm = nouveau_bdev(ttm->bdev);
-	device = nvxx_device(&drm->client.device);
-	dev = drm->dev;
-	pdev = device->dev;
+	dev = drm->dev->dev;
 
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
@@ -1476,7 +1572,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
-		return ttm_dma_populate((void *)ttm, dev->dev);
+		return ttm_dma_populate((void *)ttm, dev);
 	}
 #endif
 
@@ -1488,12 +1584,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 	for (i = 0; i < ttm->num_pages; i++) {
 		dma_addr_t addr;
 
-		addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+		addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
 				    DMA_BIDIRECTIONAL);
 
-		if (dma_mapping_error(pdev, addr)) {
+		if (dma_mapping_error(dev, addr)) {
 			while (i--) {
-				dma_unmap_page(pdev, ttm_dma->dma_address[i],
+				dma_unmap_page(dev, ttm_dma->dma_address[i],
 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
 				ttm_dma->dma_address[i] = 0;
 			}
@@ -1511,9 +1607,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
-	struct nvkm_device *device;
-	struct drm_device *dev;
-	struct device *pdev;
+	struct device *dev;
 	unsigned i;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
@@ -1521,9 +1615,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 		return;
 
 	drm = nouveau_bdev(ttm->bdev);
-	device = nvxx_device(&drm->client.device);
-	dev = drm->dev;
-	pdev = device->dev;
+	dev = drm->dev->dev;
 
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
@@ -1534,14 +1626,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
-		ttm_dma_unpopulate((void *)ttm, dev->dev);
+		ttm_dma_unpopulate((void *)ttm, dev);
 		return;
 	}
 #endif
 
 	for (i = 0; i < ttm->num_pages; i++) {
 		if (ttm_dma->dma_address[i]) {
-			dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+			dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
 				       DMA_BIDIRECTIONAL);
 		}
 	}
@@ -1576,48 +1668,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.io_mem_free = &nouveau_ttm_io_mem_free,
 	.io_mem_pfn = ttm_bo_default_io_mem_pfn,
 };
-
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
-{
-	struct nvkm_vma *vma;
-	list_for_each_entry(vma, &nvbo->vma_list, head) {
-		if (vma->vm == vm)
-			return vma;
-	}
-
-	return NULL;
-}
-
-int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
-		   struct nvkm_vma *vma)
-{
-	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-	int ret;
-
-	ret = nvkm_vm_get(vm, size, nvbo->page_shift,
-			     NV_MEM_ACCESS_RW, vma);
-	if (ret)
-		return ret;
-
-	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
-	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
-	     nvbo->page_shift != vma->vm->mmu->lpg_shift))
-		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
-
-	list_add_tail(&vma->head, &nvbo->vma_list);
-	vma->refcount = 1;
-	return 0;
-}
-
-void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
-{
-	if (vma->node) {
-		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
-			nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-		list_del(&vma->head);
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index b06a5385d6dd1..23002bdd94a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -24,12 +24,16 @@ struct nouveau_bo {
 	bool validate_mapped;
 
 	struct list_head vma_list;
-	unsigned page_shift;
 
 	struct nouveau_cli *cli;
 
-	u32 tile_mode;
-	u32 tile_flags;
+	unsigned contig:1;
+	unsigned page:5;
+	unsigned kind:8;
+	unsigned comp:3;
+	unsigned zeta:3;
+	unsigned mode;
+
 	struct nouveau_drm_tile *tile;
 
 	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
@@ -89,13 +93,6 @@ int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
 void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
 void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
 
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
-
-int  nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
-			struct nvkm_vma *);
-void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
-
 /* TODO: submit equivalent to TTM generic API upstream? */
 static inline void __iomem *
 nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index dbc41fa86ee8b..af1116655910c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -40,6 +40,7 @@
 #include "nouveau_chan.h"
 #include "nouveau_fence.h"
 #include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
 
 MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
 int nouveau_vram_pushbuf;
@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 {
 	struct nouveau_channel *chan = *pchan;
 	if (chan) {
+		struct nouveau_cli *cli = (void *)chan->user.client;
+		bool super;
+
+		if (cli) {
+			super = cli->base.super;
+			cli->base.super = true;
+		}
+
 		if (chan->fence)
 			nouveau_fence(chan->drm)->context_del(chan);
 		nvif_object_fini(&chan->nvsw);
@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 		nvif_notify_fini(&chan->kill);
 		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->push.ctxdma);
-		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+		nouveau_vma_del(&chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
 		kfree(chan);
+
+		if (cli)
+			cli->base.super = super;
 	}
 	*pchan = NULL;
 }
@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		     u32 size, struct nouveau_channel **pchan)
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
-	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nouveau_channel *chan;
 	u32 target;
@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 	 * pushbuf lives in, this is because the GEM code requires that
 	 * we be able to call out to other (indirect) push buffers
 	 */
-	chan->push.vma.offset = chan->push.buffer->bo.offset;
+	chan->push.addr = chan->push.buffer->bo.offset;
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
-					&chan->push.vma);
+		ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
+				      &chan->push.vma);
 		if (ret) {
 			nouveau_channel_del(pchan);
 			return ret;
@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
 		args.start = 0;
-		args.limit = cli->vm->mmu->limit - 1;
+		args.limit = cli->vmm.vmm.limit - 1;
+
+		chan->push.addr = chan->push.vma->addr;
 	} else
 	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = 0;
-			args.limit = mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		}
 	}
 
@@ -203,6 +216,7 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		    u32 engine, struct nouveau_channel **pchan)
 {
+	struct nouveau_cli *cli = (void *)device->object.client;
 	static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
 					MAXWELL_CHANNEL_GPFIFO_A,
 					KEPLER_CHANNEL_GPFIFO_B,
@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 			args.kepler.version = 0;
 			args.kepler.engines = engine;
 			args.kepler.ilength = 0x02000;
-			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
-			args.kepler.vm = 0;
+			args.kepler.ioffset = 0x10000 + chan->push.addr;
+			args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.kepler);
 		} else
 		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
 			args.fermi.version = 0;
 			args.fermi.ilength = 0x02000;
-			args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
-			args.fermi.vm = 0;
+			args.fermi.ioffset = 0x10000 + chan->push.addr;
+			args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.fermi);
 		} else {
 			args.nv50.version = 0;
 			args.nv50.ilength = 0x02000;
-			args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+			args.nv50.ioffset = 0x10000 + chan->push.addr;
 			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
-			args.nv50.vm = 0;
+			args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.nv50);
 		}
 
@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
 	/* create channel object */
 	args.version = 0;
 	args.pushbuf = nvif_handle(&chan->push.ctxdma);
-	args.offset = chan->push.vma.offset;
+	args.offset = chan->push.addr;
 
 	do {
 		ret = nvif_object_init(&device->object, 0, *oclass++,
@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 	struct nvif_device *device = chan->device;
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_drm *drm = chan->drm;
-	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	int ret, i;
 
-	nvif_object_map(&chan->user);
+	nvif_object_map(&chan->user, NULL, 0);
 
 	if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
 		ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.start = 0;
-			args.limit = cli->vm->mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		} else {
 			args.target = NV_DMA_V0_TARGET_VRAM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.start = 0;
-			args.limit = cli->vm->mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		} else
 		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = 0;
-			args.limit = mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		}
 
 		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 46b947ba1cf4a..f29d3a72c48c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -16,8 +16,9 @@ struct nouveau_channel {
 
 	struct {
 		struct nouveau_bo *buffer;
-		struct nvkm_vma vma;
+		struct nouveau_vma *vma;
 		struct nvif_object ctxdma;
+		u64 addr;
 	} push;
 
 	/* TODO: this will be reworked in the near future */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 201aec2ea5b81..1411bf05b89d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,14 +1,11 @@
 #ifndef __NOUVEAU_DISPLAY_H__
 #define __NOUVEAU_DISPLAY_H__
-
-#include <subdev/mmu.h>
-
 #include "nouveau_drv.h"
 
 struct nouveau_framebuffer {
 	struct drm_framebuffer base;
 	struct nouveau_bo *nvbo;
-	struct nvkm_vma vma;
+	struct nouveau_vma *vma;
 	u32 r_handle;
 	u32 r_format;
 	u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2634a1a798885..10e84f6ca2b73 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -26,6 +26,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_vmm.h"
 
 void
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 			return -EBUSY;
 	}
 
-	if (val < chan->push.vma.offset ||
-	    val > chan->push.vma.offset + (chan->dma.max << 2))
+	if (val < chan->push.addr ||
+	    val > chan->push.addr + (chan->dma.max << 2))
 		return -EINVAL;
 
-	return (val - chan->push.vma.offset) >> 2;
+	return (val - chan->push.addr) >> 2;
 }
 
 void
@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 {
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
 	u64 offset;
 
-	vma = nouveau_bo_vma_find(bo, cli->vm);
+	vma = nouveau_vma_find(bo, &cli->vmm);
 	BUG_ON(!vma);
-	offset = vma->offset + delta;
+	offset = vma->addr + delta;
 
 	BUG_ON(chan->dma.ib_free < 1);
 
@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
 			 * instruct the GPU to jump back to the start right
 			 * after processing the currently pending commands.
 			 */
-			OUT_RING(chan, chan->push.vma.offset | 0x20000000);
+			OUT_RING(chan, chan->push.addr | 0x20000000);
 
 			/* wait for GET to depart from the skips area.
 			 * prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index aff3a9d0a1fcd..74e10b14a7da2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
 #define WRITE_PUT(val) do {                                                    \
 	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
-	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
 } while (0)
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 595630d1fb9e2..8d4a5be3b9130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev)
 		return nouveau_platform_name(to_platform_device(dev->dev));
 }
 
+static inline bool
+nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
+{
+	if (!dma_fence_is_signaled(fence)) {
+		if (!wait)
+			return false;
+		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+	}
+	dma_fence_put(fence);
+	return true;
+}
+
+static void
+nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
+{
+	struct nouveau_cli_work *work, *wtmp;
+	mutex_lock(&cli->lock);
+	list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
+		if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
+			list_del(&work->head);
+			work->func(work);
+		}
+	}
+	mutex_unlock(&cli->lock);
+}
+
+static void
+nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
+	schedule_work(&work->cli->work);
+}
+
+void
+nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
+		       struct nouveau_cli_work *work)
+{
+	work->fence = dma_fence_get(fence);
+	work->cli = cli;
+	mutex_lock(&cli->lock);
+	list_add_tail(&work->head, &cli->worker);
+	mutex_unlock(&cli->lock);
+	if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
+		nouveau_cli_work_fence(fence, &work->cb);
+}
+
+static void
+nouveau_cli_work(struct work_struct *w)
+{
+	struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
+	nouveau_cli_work_flush(cli, false);
+}
+
 static void
 nouveau_cli_fini(struct nouveau_cli *cli)
 {
-	nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
+	nouveau_cli_work_flush(cli, true);
 	usif_client_fini(cli);
+	nouveau_vmm_fini(&cli->vmm);
+	nvif_mmu_fini(&cli->mmu);
 	nvif_device_fini(&cli->device);
+	mutex_lock(&cli->drm->master.lock);
 	nvif_client_fini(&cli->base);
+	mutex_unlock(&cli->drm->master.lock);
 }
 
 static int
 nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 		 struct nouveau_cli *cli)
 {
+	static const struct nvif_mclass
+	mems[] = {
+		{ NVIF_CLASS_MEM_GF100, -1 },
+		{ NVIF_CLASS_MEM_NV50 , -1 },
+		{ NVIF_CLASS_MEM_NV04 , -1 },
+		{}
+	};
+	static const struct nvif_mclass
+	mmus[] = {
+		{ NVIF_CLASS_MMU_GF100, -1 },
+		{ NVIF_CLASS_MMU_NV50 , -1 },
+		{ NVIF_CLASS_MMU_NV04 , -1 },
+		{}
+	};
+	static const struct nvif_mclass
+	vmms[] = {
+		{ NVIF_CLASS_VMM_GP100, -1 },
+		{ NVIF_CLASS_VMM_GM200, -1 },
+		{ NVIF_CLASS_VMM_GF100, -1 },
+		{ NVIF_CLASS_VMM_NV50 , -1 },
+		{ NVIF_CLASS_VMM_NV04 , -1 },
+		{}
+	};
 	u64 device = nouveau_name(drm->dev);
 	int ret;
 
 	snprintf(cli->name, sizeof(cli->name), "%s", sname);
-	cli->dev = drm->dev;
+	cli->drm = drm;
 	mutex_init(&cli->mutex);
 	usif_client_init(cli);
 
-	if (cli == &drm->client) {
+	INIT_WORK(&cli->work, nouveau_cli_work);
+	INIT_LIST_HEAD(&cli->worker);
+	mutex_init(&cli->lock);
+
+	if (cli == &drm->master) {
 		ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
 				       cli->name, device, &cli->base);
 	} else {
-		ret = nvif_client_init(&drm->client.base, cli->name, device,
+		mutex_lock(&drm->master.lock);
+		ret = nvif_client_init(&drm->master.base, cli->name, device,
 				       &cli->base);
+		mutex_unlock(&drm->master.lock);
 	}
 	if (ret) {
 		NV_ERROR(drm, "Client allocation failed: %d\n", ret);
@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 		goto done;
 	}
 
+	ret = nvif_mclass(&cli->device.object, mmus);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported MMU class\n");
+		goto done;
+	}
+
+	ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+	if (ret) {
+		NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+		goto done;
+	}
+
+	ret = nvif_mclass(&cli->mmu.object, vmms);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported VMM class\n");
+		goto done;
+	}
+
+	ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
+	if (ret) {
+		NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+		goto done;
+	}
+
+	ret = nvif_mclass(&cli->mmu.object, mems);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported MEM class\n");
+		goto done;
+	}
+
+	cli->mem = &mems[ret];
+	return 0;
 done:
 	if (ret)
 		nouveau_cli_fini(cli);
@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	dev->dev_private = drm;
 	drm->dev = dev;
 
+	ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+	if (ret)
+		return ret;
+
 	ret = nouveau_cli_init(drm, "DRM", &drm->client);
 	if (ret)
 		return ret;
@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 
 	nouveau_vga_init(drm);
 
-	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		if (!nvxx_device(&drm->client.device)->mmu) {
-			ret = -ENOSYS;
-			goto fail_device;
-		}
-
-		ret = nvkm_vm_new(nvxx_device(&drm->client.device),
-				  0, (1ULL << 40), 0x1000, NULL,
-				  &drm->client.vm);
-		if (ret)
-			goto fail_device;
-
-		nvxx_client(&drm->client.base)->vm = drm->client.vm;
-	}
-
 	ret = nouveau_ttm_init(drm);
 	if (ret)
 		goto fail_ttm;
@@ -516,8 +623,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	nouveau_ttm_fini(drm);
 fail_ttm:
 	nouveau_vga_fini(drm);
-fail_device:
 	nouveau_cli_fini(&drm->client);
+	nouveau_cli_fini(&drm->master);
 	kfree(drm);
 	return ret;
 }
@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev)
 	if (drm->hdmi_device)
 		pci_dev_put(drm->hdmi_device);
 	nouveau_cli_fini(&drm->client);
+	nouveau_cli_fini(&drm->master);
 	kfree(drm);
 }
 
@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
 	}
 
 	NV_DEBUG(drm, "suspending object tree...\n");
-	ret = nvif_client_suspend(&drm->client.base);
+	ret = nvif_client_suspend(&drm->master.base);
 	if (ret)
 		goto fail_client;
 
@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	NV_DEBUG(drm, "resuming object tree...\n");
-	nvif_client_resume(&drm->client.base);
+	nvif_client_resume(&drm->master.base);
 
 	NV_DEBUG(drm, "resuming fence...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 
 	cli->base.super = false;
 
-	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
-				  (1ULL << 40), 0x1000, NULL, &cli->vm);
-		if (ret)
-			goto done;
-
-		nvxx_client(&cli->base)->vm = cli->vm;
-	}
-
 	fpriv->driver_priv = cli;
 
 	mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822fe1d4d35e1..e86b8220a4bb6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -5,7 +5,7 @@
 #define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
 
 #define DRIVER_NAME		"nouveau"
-#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
 #define DRIVER_DATE		"20120801"
 
 #define DRIVER_MAJOR		1
@@ -42,6 +42,8 @@
 #include <nvif/client.h>
 #include <nvif/device.h>
 #include <nvif/ioctl.h>
+#include <nvif/mmu.h>
+#include <nvif/vmm.h>
 
 #include <drm/drmP.h>
 
@@ -61,6 +63,7 @@ struct platform_device;
 
 #include "nouveau_fence.h"
 #include "nouveau_bios.h"
+#include "nouveau_vmm.h"
 
 struct nouveau_drm_tile {
 	struct nouveau_fence *fence;
@@ -86,19 +89,37 @@ enum nouveau_drm_handle {
 
 struct nouveau_cli {
 	struct nvif_client base;
-	struct drm_device *dev;
+	struct nouveau_drm *drm;
 	struct mutex mutex;
 
 	struct nvif_device device;
+	struct nvif_mmu mmu;
+	struct nouveau_vmm vmm;
+	const struct nvif_mclass *mem;
 
-	struct nvkm_vm *vm; /*XXX*/
 	struct list_head head;
 	void *abi16;
 	struct list_head objects;
 	struct list_head notifys;
 	char name[32];
+
+	struct work_struct work;
+	struct list_head worker;
+	struct mutex lock;
 };
 
+struct nouveau_cli_work {
+	void (*func)(struct nouveau_cli_work *);
+	struct nouveau_cli *cli;
+	struct list_head head;
+
+	struct dma_fence *fence;
+	struct dma_fence_cb cb;
+};
+
+void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
+			    struct nouveau_cli_work *);
+
 static inline struct nouveau_cli *
 nouveau_cli(struct drm_file *fpriv)
 {
@@ -109,6 +130,7 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/device.h>
 
 struct nouveau_drm {
+	struct nouveau_cli master;
 	struct nouveau_cli client;
 	struct drm_device *dev;
 
@@ -133,6 +155,9 @@ struct nouveau_drm {
 		struct nouveau_channel *chan;
 		struct nvif_object copy;
 		int mtrr;
+		int type_vram;
+		int type_host;
+		int type_ncoh;
 	} ttm;
 
 	/* GEM interface support */
@@ -204,7 +229,7 @@ void nouveau_drm_device_remove(struct drm_device *dev);
 
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 	struct nouveau_cli *_cli = (c);                                        \
-	dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a);                     \
+	dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a);                \
 } while(0)
 #define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
 #define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2b12d82aac150..c533d8e04afc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -48,6 +48,7 @@
 #include "nouveau_bo.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_chan.h"
+#include "nouveau_vmm.h"
 
 #include "nouveau_crtc.h"
 
@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 	chan = nouveau_nofbaccel ? NULL : drm->channel;
 	if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
+		ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
 		if (ret) {
 			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
 			chan = NULL;
@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 out_unlock:
 	if (chan)
-		nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+		nouveau_vma_del(&fb->vma);
 	nouveau_bo_unmap(fb->nvbo);
 out_unpin:
 	nouveau_bo_unpin(fb->nvbo);
@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 	drm_fb_helper_fini(&fbcon->helper);
 
 	if (nouveau_fb->nvbo) {
-		nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+		nouveau_vma_del(&nouveau_fb->vma);
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
 		drm_framebuffer_unreference(&nouveau_fb->base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99e14e3e0fe4d..503fa94dc06db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 	WARN_ON(ret);
 }
 
-struct nouveau_fence_work {
-	struct work_struct work;
-	struct dma_fence_cb cb;
-	void (*func)(void *);
-	void *data;
-};
-
-static void
-nouveau_fence_work_handler(struct work_struct *kwork)
-{
-	struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
-	work->func(work->data);
-	kfree(work);
-}
-
-static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
-	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
-
-	schedule_work(&work->work);
-}
-
-void
-nouveau_fence_work(struct dma_fence *fence,
-		   void (*func)(void *), void *data)
-{
-	struct nouveau_fence_work *work;
-
-	if (dma_fence_is_signaled(fence))
-		goto err;
-
-	work = kmalloc(sizeof(*work), GFP_KERNEL);
-	if (!work) {
-		/*
-		 * this might not be a nouveau fence any more,
-		 * so force a lazy wait here
-		 */
-		WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
-					   true, false));
-		goto err;
-	}
-
-	INIT_WORK(&work->work, nouveau_fence_work_handler);
-	work->func = func;
-	work->data = data;
-
-	if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
-		goto err_free;
-	return;
-
-err_free:
-	kfree(work);
-err:
-	func(data);
-}
-
 int
 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 {
@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
 	if (!fence)
 		return -ENOMEM;
 
-	fence->sysmem = sysmem;
-
 	ret = nouveau_fence_emit(fence, chan);
 	if (ret)
 		nouveau_fence_unref(&fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d5e58a38f1601..c36031aa013e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -12,8 +12,6 @@ struct nouveau_fence {
 
 	struct list_head head;
 
-	bool sysmem;
-
 	struct nouveau_channel __rcu *channel;
 	unsigned long timeout;
 };
@@ -24,7 +22,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
 
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 
@@ -90,14 +87,12 @@ int nouveau_flip_complete(struct nvif_notify *);
 
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
-	struct nvkm_vma vma;
-	struct nvkm_vma vma_gart;
+	struct nouveau_vma *vma;
 };
 
 struct nv84_fence_priv {
 	struct nouveau_fence_priv base;
 	struct nouveau_bo *bo;
-	struct nouveau_bo *bo_gart;
 	u32 *suspend;
 	struct mutex mutex;
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2170534101caf..efc89aaef66a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -31,6 +31,10 @@
 
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
 
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_vma *vma;
 	struct device *dev = drm->dev->dev;
+	struct nouveau_vma *vma;
 	int ret;
 
-	if (!cli->vm)
+	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 		return 0;
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 		return ret;
 
-	vma = nouveau_bo_vma_find(nvbo, cli->vm);
-	if (!vma) {
-		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-		if (!vma) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		ret = pm_runtime_get_sync(dev);
-		if (ret < 0 && ret != -EACCES) {
-			kfree(vma);
-			goto out;
-		}
-
-		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
-		if (ret)
-			kfree(vma);
-
-		pm_runtime_mark_last_busy(dev);
-		pm_runtime_put_autosuspend(dev);
-	} else {
-		vma->refcount++;
-	}
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0 && ret != -EACCES)
+		goto out;
 
+	ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
 out:
 	ttm_bo_unreserve(&nvbo->bo);
 	return ret;
 }
 
+struct nouveau_gem_object_unmap {
+	struct nouveau_cli_work work;
+	struct nouveau_vma *vma;
+};
+
 static void
-nouveau_gem_object_delete(void *data)
+nouveau_gem_object_delete(struct nouveau_vma *vma)
 {
-	struct nvkm_vma *vma = data;
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-	kfree(vma);
+	nouveau_vma_del(&vma);
 }
 
 static void
-nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
+nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
+{
+	struct nouveau_gem_object_unmap *work =
+		container_of(w, typeof(*work), work);
+	nouveau_gem_object_delete(work->vma);
+	kfree(work);
+}
+
+static void
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 {
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
+	struct nouveau_gem_object_unmap *work;
 	struct dma_fence *fence = NULL;
 
 	fobj = reservation_object_get_list(resv);
 
-	list_del(&vma->head);
+	list_del_init(&vma->head);
 
 	if (fobj && fobj->shared_count > 1)
 		ttm_bo_wait(&nvbo->bo, false, false);
@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
 	else
 		fence = reservation_object_get_excl(nvbo->bo.resv);
 
-	if (fence && mapped) {
-		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
-	} else {
-		if (mapped)
-			nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-		kfree(vma);
+	if (!fence || !mapped) {
+		nouveau_gem_object_delete(vma);
+		return;
+	}
+
+	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
+		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+		nouveau_gem_object_delete(vma);
+		return;
 	}
+
+	work->work.func = nouveau_gem_object_delete_work;
+	work->vma = vma;
+	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 }
 
 void
@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct device *dev = drm->dev->dev;
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 	int ret;
 
-	if (!cli->vm)
+	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 		return;
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 		return;
 
-	vma = nouveau_bo_vma_find(nvbo, cli->vm);
+	vma = nouveau_vma_find(nvbo, &cli->vmm);
 	if (vma) {
-		if (--vma->refcount == 0) {
+		if (--vma->refs == 0) {
 			ret = pm_runtime_get_sync(dev);
 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 				nouveau_gem_object_unmap(nvbo, vma);
@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 		uint32_t tile_mode, uint32_t tile_flags,
 		struct nouveau_bo **pnvbo)
 {
-	struct nouveau_drm *drm = nouveau_drm(cli->dev);
+	struct nouveau_drm *drm = cli->drm;
 	struct nouveau_bo *nvbo;
 	u32 flags = 0;
 	int ret;
@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 {
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 
 	if (is_power_of_2(nvbo->valid_domains))
 		rep->domain = nvbo->valid_domains;
@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 	else
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 	rep->offset = nvbo->bo.offset;
-	if (cli->vm) {
-		vma = nouveau_bo_vma_find(nvbo, cli->vm);
+	if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+		vma = nouveau_vma_find(nvbo, &cli->vmm);
 		if (!vma)
 			return -EINVAL;
 
-		rep->offset = vma->offset;
+		rep->offset = vma->addr;
 	}
 
 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
-	rep->tile_mode = nvbo->tile_mode;
-	rep->tile_flags = nvbo->tile_flags;
+	rep->tile_mode = nvbo->mode;
+	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+		rep->tile_flags |= nvbo->kind << 8;
+	else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
+	else
+		rep->tile_flags |= nvbo->zeta;
 	return 0;
 }
 
@@ -255,18 +267,11 @@ int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 
-	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
-		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
-		return -EINVAL;
-	}
-
 	ret = nouveau_gem_new(cli, req->info.size, req->align,
 			      req->info.domain, req->info.tile_mode,
 			      req->info.tile_flags, &nvbo);
@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 				bo[push[i].bo_index].user_priv;
 			uint32_t cmd;
 
-			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
+			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 			cmd |= 0x20000000;
 			if (unlikely(cmd != req->suffix0)) {
 				if (!nvbo->kmap.virtual) {
@@ -843,7 +848,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 		req->suffix1 = 0x00000000;
 	} else {
 		req->suffix0 = 0x20000000 |
-			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
+			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 		req->suffix1 = 0x00000000;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 8fa6ed9ddd3af..d39f845dda875 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -6,9 +6,6 @@
 #include "nouveau_drv.h"
 #include "nouveau_bo.h"
 
-#define nouveau_bo_tile_layout(nvbo)				\
-	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 0000000000000..589a9621db763
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_mem.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+
+int
+nouveau_mem_map(struct nouveau_mem *mem,
+		struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+	union {
+		struct nv50_vmm_map_v0 nv50;
+		struct gf100_vmm_map_v0 gf100;
+	} args;
+	u32 argc = 0;
+	bool super;
+	int ret;
+
+	switch (vmm->object.oclass) {
+	case NVIF_CLASS_VMM_NV04:
+		break;
+	case NVIF_CLASS_VMM_NV50:
+		args.nv50.version = 0;
+		args.nv50.ro = 0;
+		args.nv50.priv = 0;
+		args.nv50.kind = mem->kind;
+		args.nv50.comp = mem->comp;
+		argc = sizeof(args.nv50);
+		break;
+	case NVIF_CLASS_VMM_GF100:
+	case NVIF_CLASS_VMM_GM200:
+	case NVIF_CLASS_VMM_GP100:
+		args.gf100.version = 0;
+		if (mem->mem.type & NVIF_MEM_VRAM)
+			args.gf100.vol = 0;
+		else
+			args.gf100.vol = 1;
+		args.gf100.ro = 0;
+		args.gf100.priv = 0;
+		args.gf100.kind = mem->kind;
+		argc = sizeof(args.gf100);
+		break;
+	default:
+		WARN_ON(1);
+		return -ENOSYS;
+	}
+
+	super = vmm->object.client->super;
+	vmm->object.client->super = true;
+	ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
+			   &mem->mem, 0);
+	vmm->object.client->super = super;
+	return ret;
+}
+
+void
+nouveau_mem_fini(struct nouveau_mem *mem)
+{
+	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
+	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
+	mutex_lock(&mem->cli->drm->master.lock);
+	nvif_mem_fini(&mem->mem);
+	mutex_unlock(&mem->cli->drm->master.lock);
+}
+
+int
+nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	struct nouveau_cli *cli = mem->cli;
+	struct nouveau_drm *drm = cli->drm;
+	struct nvif_mmu *mmu = &cli->mmu;
+	struct nvif_mem_ram_v0 args = {};
+	bool super = cli->base.super;
+	u8 type;
+	int ret;
+
+	if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+		type = drm->ttm.type_ncoh;
+	else
+		type = drm->ttm.type_host;
+
+	if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
+		mem->comp = mem->kind = 0;
+	if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
+		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+			mem->kind = mmu->kind[mem->kind];
+		mem->comp = 0;
+	}
+
+	if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
+	else            args.dma = tt->dma_address;
+
+	mutex_lock(&drm->master.lock);
+	cli->base.super = true;
+	ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+				 reg->num_pages << PAGE_SHIFT,
+				 &args, sizeof(args), &mem->mem);
+	cli->base.super = super;
+	mutex_unlock(&drm->master.lock);
+	return ret;
+}
+
+int
+nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	struct nouveau_cli *cli = mem->cli;
+	struct nouveau_drm *drm = cli->drm;
+	struct nvif_mmu *mmu = &cli->mmu;
+	bool super = cli->base.super;
+	u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+	int ret;
+
+	mutex_lock(&drm->master.lock);
+	cli->base.super = true;
+	switch (cli->mem->oclass) {
+	case NVIF_CLASS_MEM_GF100:
+		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+					 drm->ttm.type_vram, page, size,
+					 &(struct gf100_mem_v0) {
+						.contig = contig,
+					 }, sizeof(struct gf100_mem_v0),
+					 &mem->mem);
+		break;
+	case NVIF_CLASS_MEM_NV50:
+		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+					 drm->ttm.type_vram, page, size,
+					 &(struct nv50_mem_v0) {
+						.bankswz = mmu->kind[mem->kind] == 2,
+						.contig = contig,
+					 }, sizeof(struct nv50_mem_v0),
+					 &mem->mem);
+		break;
+	default:
+		ret = -ENOSYS;
+		WARN_ON(1);
+		break;
+	}
+	cli->base.super = super;
+	mutex_unlock(&drm->master.lock);
+
+	reg->start = mem->mem.addr >> PAGE_SHIFT;
+	return ret;
+}
+
+void
+nouveau_mem_del(struct ttm_mem_reg *reg)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	nouveau_mem_fini(mem);
+	kfree(reg->mm_node);
+	reg->mm_node = NULL;
+}
+
+int
+nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+		struct ttm_mem_reg *reg)
+{
+	struct nouveau_mem *mem;
+
+	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+		return -ENOMEM;
+	mem->cli = cli;
+	mem->kind = kind;
+	mem->comp = comp;
+
+	reg->mm_node = mem;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
new file mode 100644
index 0000000000000..f6d039e738121
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_MEM_H__
+#define __NOUVEAU_MEM_H__
+#include <drm/ttm/ttm_bo_api.h>
+struct ttm_dma_tt;
+
+#include <nvif/mem.h>
+#include <nvif/vmm.h>
+
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_mem_reg *reg)
+{
+	return reg->mm_node;
+}
+
+struct nouveau_mem {
+	struct nouveau_cli *cli;
+	u8 kind;
+	u8 comp;
+	struct nvif_mem mem;
+	struct nvif_vma vma[2];
+};
+
+int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+		    struct ttm_mem_reg *);
+void nouveau_mem_del(struct ttm_mem_reg *);
+int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+void nouveau_mem_fini(struct nouveau_mem *);
+int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b7ab268f7d6f5..941bf33bd2497 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -2,6 +2,7 @@
 #include <linux/slab.h>
 
 #include "nouveau_drv.h"
+#include "nouveau_mem.h"
 #include "nouveau_ttm.h"
 
 struct nouveau_sgdma_be {
@@ -9,7 +10,7 @@ struct nouveau_sgdma_be {
 	 * nouve_bo.c works properly, otherwise have to move them here
 	 */
 	struct ttm_dma_tt ttm;
-	struct nvkm_mem *node;
+	struct nouveau_mem *mem;
 };
 
 static void
@@ -27,19 +28,20 @@ static int
 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct nvkm_mem *node = reg->mm_node;
-
-	if (ttm->sg) {
-		node->sg    = ttm->sg;
-		node->pages = NULL;
-	} else {
-		node->sg    = NULL;
-		node->pages = nvbe->ttm.dma_address;
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	int ret;
+
+	ret = nouveau_mem_host(reg, &nvbe->ttm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+	if (ret) {
+		nouveau_mem_fini(mem);
+		return ret;
 	}
-	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
 
-	nvkm_vm_map(&node->vma[0], node);
-	nvbe->node = node;
+	nvbe->mem = mem;
 	return 0;
 }
 
@@ -47,7 +49,7 @@ static int
 nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	nvkm_vm_unmap(&nvbe->node->vma[0]);
+	nouveau_mem_fini(nvbe->mem);
 	return 0;
 }
 
@@ -61,30 +63,20 @@ static int
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct nvkm_mem *node = reg->mm_node;
-
-	/* noop: bound in move_notify() */
-	if (ttm->sg) {
-		node->sg    = ttm->sg;
-		node->pages = NULL;
-	} else {
-		node->sg    = NULL;
-		node->pages = nvbe->ttm.dma_address;
-	}
-	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
-	return 0;
-}
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	int ret;
 
-static int
-nv50_sgdma_unbind(struct ttm_tt *ttm)
-{
-	/* noop: unbound in move_notify() */
+	ret = nouveau_mem_host(reg, &nvbe->ttm);
+	if (ret)
+		return ret;
+
+	nvbe->mem = mem;
 	return 0;
 }
 
 static struct ttm_backend_func nv50_sgdma_backend = {
 	.bind			= nv50_sgdma_bind,
-	.unbind			= nv50_sgdma_unbind,
+	.unbind			= nv04_sgdma_unbind,
 	.destroy		= nouveau_sgdma_destroy
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index b0ad7fcefcf5b..08b974b304827 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -23,53 +23,37 @@
  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
-
 #include "nouveau_drv.h"
-#include "nouveau_ttm.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_ttm.h"
 
 #include <drm/drm_legacy.h>
 
 #include <core/tegra.h>
 
 static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
-	man->priv = fb;
 	return 0;
 }
 
 static int
-nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+nouveau_manager_fini(struct ttm_mem_type_manager *man)
 {
-	man->priv = NULL;
 	return 0;
 }
 
-static inline void
-nvkm_mem_node_cleanup(struct nvkm_mem *node)
+static void
+nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
 {
-	if (node->vma[0].node) {
-		nvkm_vm_unmap(&node->vma[0]);
-		nvkm_vm_put(&node->vma[0]);
-	}
-
-	if (node->vma[1].node) {
-		nvkm_vm_unmap(&node->vma[1]);
-		nvkm_vm_put(&node->vma[1]);
-	}
+	nouveau_mem_del(reg);
 }
 
 static void
-nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *reg)
+nouveau_manager_debug(struct ttm_mem_type_manager *man,
+		      struct drm_printer *printer)
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
-	nvkm_mem_node_cleanup(reg->mm_node);
-	ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
 }
 
 static int
@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *reg)
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_mem *node;
-	u32 size_nc = 0;
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
 	int ret;
 
 	if (drm->client.device.info.ram_size == 0)
 		return -ENOMEM;
 
-	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
-		size_nc = 1 << nvbo->page_shift;
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
-	ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
-			     reg->page_alignment << PAGE_SHIFT, size_nc,
-			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
 	if (ret) {
-		reg->mm_node = NULL;
-		return (ret == -ENOSPC) ? 0 : ret;
+		nouveau_mem_del(reg);
+		if (ret == -ENOSPC) {
+			reg->mm_node = NULL;
+			return 0;
+		}
+		return ret;
 	}
 
-	node->page_shift = nvbo->page_shift;
-
-	reg->mm_node = node;
-	reg->start   = node->offset >> PAGE_SHIFT;
 	return 0;
 }
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-	.init = nouveau_vram_manager_init,
-	.takedown = nouveau_vram_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nouveau_vram_manager_new,
-	.put_node = nouveau_vram_manager_del,
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug,
 };
 
-static int
-nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	return 0;
-}
-
-static int
-nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
-	return 0;
-}
-
-static void
-nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *reg)
-{
-	nvkm_mem_node_cleanup(reg->mm_node);
-	kfree(reg->mm_node);
-	reg->mm_node = NULL;
-}
-
 static int
 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *reg)
 {
-	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_mem *node;
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
+	int ret;
 
-	node->page_shift = 12;
-
-	switch (drm->client.device.info.family) {
-	case NV_DEVICE_INFO_V0_TNT:
-	case NV_DEVICE_INFO_V0_CELSIUS:
-	case NV_DEVICE_INFO_V0_KELVIN:
-	case NV_DEVICE_INFO_V0_RANKINE:
-	case NV_DEVICE_INFO_V0_CURIE:
-		break;
-	case NV_DEVICE_INFO_V0_TESLA:
-		if (drm->client.device.info.chipset != 0x50)
-			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
-		break;
-	case NV_DEVICE_INFO_V0_FERMI:
-	case NV_DEVICE_INFO_V0_KEPLER:
-	case NV_DEVICE_INFO_V0_MAXWELL:
-	case NV_DEVICE_INFO_V0_PASCAL:
-		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
-		break;
-	default:
-		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
-			drm->client.device.info.family);
-		break;
-	}
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
-	reg->mm_node = node;
-	reg->start   = 0;
+	reg->start = 0;
 	return 0;
 }
 
-static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
-			   struct drm_printer *printer)
-{
-}
-
 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-	.init = nouveau_gart_manager_init,
-	.takedown = nouveau_gart_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nouveau_gart_manager_new,
-	.put_node = nouveau_gart_manager_del,
-	.debug = nouveau_gart_manager_debug
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug
 };
 
-/*XXX*/
-#include <subdev/mmu/nv04.h>
-static int
-nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
-	struct nv04_mmu *priv = (void *)mmu;
-	struct nvkm_vm *vm = NULL;
-	nvkm_vm_ref(priv->vm, &vm, NULL);
-	man->priv = vm;
-	return 0;
-}
-
-static int
-nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
-	struct nvkm_vm *vm = man->priv;
-	nvkm_vm_ref(NULL, &vm, NULL);
-	man->priv = NULL;
-	return 0;
-}
-
-static void
-nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
-{
-	struct nvkm_mem *node = reg->mm_node;
-	if (node->vma[0].node)
-		nvkm_vm_put(&node->vma[0]);
-	kfree(reg->mm_node);
-	reg->mm_node = NULL;
-}
-
 static int
 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 		      struct ttm_buffer_object *bo,
 		      const struct ttm_place *place,
 		      struct ttm_mem_reg *reg)
 {
-	struct nvkm_mem *node;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
 	int ret;
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
-
-	node->page_shift = 12;
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
-	ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
-			  NV_MEM_ACCESS_RW, &node->vma[0]);
+	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
 	if (ret) {
-		kfree(node);
+		nouveau_mem_del(reg);
+		if (ret == -ENOSPC) {
+			reg->mm_node = NULL;
+			return 0;
+		}
 		return ret;
 	}
 
-	reg->mm_node = node;
-	reg->start   = node->vma[0].offset >> PAGE_SHIFT;
+	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
 	return 0;
 }
 
-static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
-			struct drm_printer *printer)
-{
-}
-
 const struct ttm_mem_type_manager_func nv04_gart_manager = {
-	.init = nv04_gart_manager_init,
-	.takedown = nv04_gart_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nv04_gart_manager_new,
-	.put_node = nv04_gart_manager_del,
-	.debug = nv04_gart_manager_debug
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug
 };
 
 int
@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 {
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nvkm_pci *pci = device->pci;
+	struct nvif_mmu *mmu = &drm->client.mmu;
 	struct drm_device *dev = drm->dev;
-	u8 bits;
-	int ret;
+	int typei, ret;
 
-	if (pci && pci->agp.bridge) {
-		drm->agp.bridge = pci->agp.bridge;
-		drm->agp.base = pci->agp.base;
-		drm->agp.size = pci->agp.size;
-		drm->agp.cma = pci->agp.cma;
-	}
+	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
+						   NVIF_MEM_COHERENT);
+	if (typei < 0)
+		return -ENOSYS;
 
-	bits = nvxx_mmu(&drm->client.device)->dma_bits;
-	if (nvxx_device(&drm->client.device)->func->pci) {
-		if (drm->agp.bridge)
-			bits = 32;
-	} else if (device->func->tegra) {
-		struct nvkm_device_tegra *tegra = device->func->tegra(device);
+	drm->ttm.type_host = typei;
 
-		/*
-		 * If the platform can use a IOMMU, then the addressable DMA
-		 * space is constrained by the IOMMU bit
-		 */
-		if (tegra->func->iommu_bit)
-			bits = min(bits, tegra->func->iommu_bit);
+	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+	if (typei < 0)
+		return -ENOSYS;
 
-	}
+	drm->ttm.type_ncoh = typei;
 
-	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret && bits != 32) {
-		bits = 32;
-		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
+	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
+	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
+					   NVIF_MEM_KIND |
+					   NVIF_MEM_COMP |
+					   NVIF_MEM_DISP);
+		if (typei < 0)
+			return -ENOSYS;
+
+		drm->ttm.type_vram = typei;
+	} else {
+		drm->ttm.type_vram = -1;
 	}
-	if (ret)
-		return ret;
 
-	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret)
-		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
+	if (pci && pci->agp.bridge) {
+		drm->agp.bridge = pci->agp.bridge;
+		drm->agp.base = pci->agp.base;
+		drm->agp.size = pci->agp.size;
+		drm->agp.cma = pci->agp.cma;
+	}
 
 	ret = nouveau_ttm_global_init(drm);
 	if (ret)
@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 				  &nouveau_bo_driver,
 				  dev->anon_inode->i_mapping,
 				  DRM_FILE_PAGE_OFFSET,
-				  bits <= 32 ? true : false);
+				  drm->client.mmu.dmabits <= 32 ? true : false);
 	if (ret) {
 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 		return ret;
@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 
 	/* GART init */
 	if (!drm->agp.bridge) {
-		drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
+		drm->gem.gart_available = drm->client.vmm.vmm.limit;
 	} else {
 		drm->gem.gart_available = drm->agp.size;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
new file mode 100644
index 0000000000000..9e2628dd8e4d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_vmm.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+#include "nouveau_mem.h"
+
+void
+nouveau_vma_unmap(struct nouveau_vma *vma)
+{
+	if (vma->mem) {
+		nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
+		vma->mem = NULL;
+	}
+}
+
+int
+nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
+{
+	struct nvif_vma tmp = { .addr = vma->addr };
+	int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
+	if (ret)
+		return ret;
+	vma->mem = mem;
+	return 0;
+}
+
+struct nouveau_vma *
+nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
+{
+	struct nouveau_vma *vma;
+
+	list_for_each_entry(vma, &nvbo->vma_list, head) {
+		if (vma->vmm == vmm)
+			return vma;
+	}
+
+	return NULL;
+}
+
+void
+nouveau_vma_del(struct nouveau_vma **pvma)
+{
+	struct nouveau_vma *vma = *pvma;
+	if (vma && --vma->refs <= 0) {
+		if (likely(vma->addr != ~0ULL)) {
+			struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
+			nvif_vmm_put(&vma->vmm->vmm, &tmp);
+		}
+		list_del(&vma->head);
+		*pvma = NULL;
+		kfree(*pvma);
+	}
+}
+
+int
+nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+		struct nouveau_vma **pvma)
+{
+	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+	struct nouveau_vma *vma;
+	struct nvif_vma tmp;
+	int ret;
+
+	if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
+		vma->refs++;
+		return 0;
+	}
+
+	if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
+		return -ENOMEM;
+	vma->vmm = vmm;
+	vma->refs = 1;
+	vma->addr = ~0ULL;
+	vma->mem = NULL;
+	list_add_tail(&vma->head, &nvbo->vma_list);
+
+	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+	    mem->mem.page == nvbo->page) {
+		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
+				   mem->mem.size, &tmp);
+		if (ret)
+			goto done;
+
+		vma->addr = tmp.addr;
+		ret = nouveau_vma_map(vma, mem);
+	} else {
+		ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+				   mem->mem.size, &tmp);
+		vma->addr = tmp.addr;
+	}
+
+done:
+	if (ret)
+		nouveau_vma_del(pvma);
+	return ret;
+}
+
+void
+nouveau_vmm_fini(struct nouveau_vmm *vmm)
+{
+	nvif_vmm_fini(&vmm->vmm);
+	vmm->cli = NULL;
+}
+
+int
+nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
+{
+	int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
+				&vmm->vmm);
+	if (ret)
+		return ret;
+
+	vmm->cli = cli;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
new file mode 100644
index 0000000000000..5c31f43678d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_VMA_H__
+#define __NOUVEAU_VMA_H__
+#include <nvif/vmm.h>
+struct nouveau_bo;
+struct nouveau_mem;
+
+struct nouveau_vma {
+	struct nouveau_vmm *vmm;
+	int refs;
+	struct list_head head;
+	u64 addr;
+
+	struct nouveau_mem *mem;
+};
+
+struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
+int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
+		    struct nouveau_vma **);
+void nouveau_vma_del(struct nouveau_vma **);
+int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vma_unmap(struct nouveau_vma *);
+
+struct nouveau_vmm {
+	struct nouveau_cli *cli;
+	struct nvif_vmm vmm;
+	struct nvkm_vm *vm;
+};
+
+int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
+void nouveau_vmm_fini(struct nouveau_vmm *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e4751f92b342d..92d46222c79df 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
 				ret = nvif_object_init(disp, 0, oclass[0],
 						       data, size, &chan->user);
 				if (ret == 0)
-					nvif_object_map(&chan->user);
+					nvif_object_map(&chan->user, NULL, 0);
 				nvif_object_sclass_put(&sclass);
 				return ret;
 			}
@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
 {
 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
 	struct nv50_dmac_ctxdma *ctxdma;
-	const u8    kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	const u8    kind = fb->nvbo->kind;
 	const u32 handle = 0xfb000000 | kind;
 	struct {
 		struct nv_dma_v0 base;
@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	int ret;
 
 	mutex_init(&dmac->lock);
+	INIT_LIST_HEAD(&dmac->ctxdma);
 
 	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
 				       &dmac->handle, GFP_KERNEL);
@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	if (ret)
 		return ret;
 
-	INIT_LIST_HEAD(&dmac->ctxdma);
 	return ret;
 }
 
@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 
 	asyw->image.w = fb->base.width;
 	asyw->image.h = fb->base.height;
-	asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	asyw->image.kind = fb->nvbo->kind;
 
 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
 		asyw->interval = 0;
@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 	if (asyw->image.kind) {
 		asyw->image.layout = 0;
 		if (drm->client.device.info.chipset >= 0xc0)
-			asyw->image.block = fb->nvbo->tile_mode >> 4;
+			asyw->image.block = fb->nvbo->mode >> 4;
 		else
-			asyw->image.block = fb->nvbo->tile_mode;
+			asyw->image.block = fb->nvbo->mode;
 		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
 	} else {
 		asyw->image.layout = 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 327dcd7901edf..facd18564e0d8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
 
 int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb->vma.offset));
-	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	OUT_RING(chan, upper_32_bits(fb->vma->addr));
+	OUT_RING(chan, lower_32_bits(fb->vma->addr));
 	BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
 	OUT_RING(chan, format);
 	OUT_RING(chan, 1);
@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb->vma.offset));
-	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	OUT_RING(chan, upper_32_bits(fb->vma->addr));
+	OUT_RING(chan, lower_32_bits(fb->vma->addr));
 	FIRE_RING(chan);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index bd7a8a1e4ad97..5f0c0c27d5dcf 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
+#include "nouveau_vmm.h"
 
 #include "nv50_display.h"
 
@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
 {
 	struct nouveau_channel *chan = fence->channel;
 	struct nv84_fence_chan *fctx = chan->fence;
-	u64 addr = chan->chid * 16;
-
-	if (fence->sysmem)
-		addr += fctx->vma_gart.offset;
-	else
-		addr += fctx->vma.offset;
+	u64 addr = fctx->vma->addr + chan->chid * 16;
 
 	return fctx->base.emit32(chan, addr, fence->base.seqno);
 }
@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
 	struct nv84_fence_chan *fctx = chan->fence;
-	u64 addr = prev->chid * 16;
-
-	if (fence->sysmem)
-		addr += fctx->vma_gart.offset;
-	else
-		addr += fctx->vma.offset;
+	u64 addr = fctx->vma->addr + prev->chid * 16;
 
 	return fctx->base.sync32(chan, addr, fence->base.seqno);
 }
@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	mutex_lock(&priv->mutex);
-	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
-	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	nouveau_vma_del(&fctx->vma);
 	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.sequence = nv84_fence_read(chan);
 
 	mutex_lock(&priv->mutex);
-	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
-	if (ret == 0) {
-		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
-					&fctx->vma_gart);
-	}
+	ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
 	mutex_unlock(&priv->mutex);
 
 	if (ret)
@@ -182,10 +168,6 @@ static void
 nv84_fence_destroy(struct nouveau_drm *drm)
 {
 	struct nv84_fence_priv *priv = drm->fence;
-	nouveau_bo_unmap(priv->bo_gart);
-	if (priv->bo_gart)
-		nouveau_bo_unpin(priv->bo_gart);
-	nouveau_bo_ref(NULL, &priv->bo_gart);
 	nouveau_bo_unmap(priv->bo);
 	if (priv->bo)
 		nouveau_bo_unpin(priv->bo);
@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm)
 			nouveau_bo_ref(NULL, &priv->bo);
 	}
 
-	if (ret == 0)
-		ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
-				     TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
-				     0, NULL, NULL, &priv->bo_gart);
-	if (ret == 0) {
-		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
-		if (ret == 0) {
-			ret = nouveau_bo_map(priv->bo_gart);
-			if (ret)
-				nouveau_bo_unpin(priv->bo_gart);
-		}
-		if (ret)
-			nouveau_bo_ref(NULL, &priv->bo_gart);
-	}
-
 	if (ret)
 		nv84_fence_destroy(drm);
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 90f27bfa381fd..c0deef4fe7274 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
 
 int
 nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
-	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
-	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
+	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
 	BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
 	OUT_RING  (chan, format);
 	OUT_RING  (chan, 1);
@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
-	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
-	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
+	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
 	FIRE_RING (chan);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 067b5e9f5ec1c..f1675a4ab6fa5 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -2,4 +2,7 @@ nvif-y := nvif/object.o
 nvif-y += nvif/client.o
 nvif-y += nvif/device.o
 nvif-y += nvif/driver.o
+nvif-y += nvif/mem.o
+nvif-y += nvif/mmu.o
 nvif-y += nvif/notify.o
+nvif-y += nvif/vmm.o
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
new file mode 100644
index 0000000000000..0f9382c601458
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mem.h>
+#include <nvif/client.h>
+
+#include <nvif/if000a.h>
+
+void
+nvif_mem_fini(struct nvif_mem *mem)
+{
+	nvif_object_fini(&mem->object);
+}
+
+int
+nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+		   u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+	struct nvif_mem_v0 *args;
+	u8 stack[128];
+	int ret;
+
+	mem->object.client = NULL;
+	if (type < 0)
+		return -EINVAL;
+
+	if (sizeof(*args) + argc > sizeof(stack)) {
+		if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+			return -ENOMEM;
+	} else {
+		args = (void *)stack;
+	}
+	args->version = 0;
+	args->type = type;
+	args->page = page;
+	args->size = size;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_init(&mmu->object, 0, oclass, args,
+			       sizeof(*args) + argc, &mem->object);
+	if (ret == 0) {
+		mem->type = mmu->type[type].type;
+		mem->page = args->page;
+		mem->addr = args->addr;
+		mem->size = args->size;
+	}
+
+	if (args != (void *)stack)
+		kfree(args);
+	return ret;
+
+}
+
+int
+nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+	      u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+	int ret = -EINVAL, i;
+
+	mem->object.client = NULL;
+
+	for (i = 0; ret && i < mmu->type_nr; i++) {
+		if ((mmu->type[i].type & type) == type) {
+			ret = nvif_mem_init_type(mmu, oclass, i, page, size,
+						 argv, argc, mem);
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
new file mode 100644
index 0000000000000..15d0dcbf7ab41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mmu.h>
+
+#include <nvif/class.h>
+#include <nvif/if0008.h>
+
+void
+nvif_mmu_fini(struct nvif_mmu *mmu)
+{
+	kfree(mmu->kind);
+	kfree(mmu->type);
+	kfree(mmu->heap);
+	nvif_object_fini(&mmu->object);
+}
+
+int
+nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+{
+	struct nvif_mmu_v0 args;
+	int ret, i;
+
+	args.version = 0;
+	mmu->heap = NULL;
+	mmu->type = NULL;
+	mmu->kind = NULL;
+
+	ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
+			       &mmu->object);
+	if (ret)
+		goto done;
+
+	mmu->dmabits = args.dmabits;
+	mmu->heap_nr = args.heap_nr;
+	mmu->type_nr = args.type_nr;
+	mmu->kind_nr = args.kind_nr;
+
+	mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
+	mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+	if (ret = -ENOMEM, !mmu->heap || !mmu->type)
+		goto done;
+
+	mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+	if (!mmu->kind && mmu->kind_nr)
+		goto done;
+
+	for (i = 0; i < mmu->heap_nr; i++) {
+		struct nvif_mmu_heap_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
+				       &args, sizeof(args));
+		if (ret)
+			goto done;
+
+		mmu->heap[i].size = args.size;
+	}
+
+	for (i = 0; i < mmu->type_nr; i++) {
+		struct nvif_mmu_type_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
+				       &args, sizeof(args));
+		if (ret)
+			goto done;
+
+		mmu->type[i].type = 0;
+		if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
+		if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
+		if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
+		if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
+		if (args.kind    ) mmu->type[i].type |= NVIF_MEM_KIND;
+		if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
+		if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
+		if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
+		mmu->type[i].heap = args.heap;
+	}
+
+	if (mmu->kind_nr) {
+		struct nvif_mmu_kind_v0 *kind;
+		u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+
+		if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
+			goto done;
+		kind->version = 0;
+		kind->count = mmu->kind_nr;
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
+				       kind, argc);
+		if (ret == 0)
+			memcpy(mmu->kind, kind->data, kind->count);
+		kfree(kind);
+	}
+
+done:
+	if (ret)
+		nvif_mmu_fini(mmu);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index c3fb6a20f567d..40adfe9b334b3 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
 }
 
 void
-nvif_object_unmap(struct nvif_object *object)
+nvif_object_unmap_handle(struct nvif_object *object)
+{
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_unmap unmap;
+	} args = {
+		.ioctl.type = NVIF_IOCTL_V0_UNMAP,
+	};
+
+	nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
+		       u64 *handle, u64 *length)
 {
-	if (object->map.size) {
-		struct nvif_client *client = object->client;
-		struct {
-			struct nvif_ioctl_v0 ioctl;
-			struct nvif_ioctl_unmap unmap;
-		} args = {
-			.ioctl.type = NVIF_IOCTL_V0_UNMAP,
-		};
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_map_v0 map;
+	} *args;
+	u32 argn = sizeof(*args) + argc;
+	int ret, maptype;
+
+	if (!(args = kzalloc(argn, GFP_KERNEL)))
+		return -ENOMEM;
+	args->ioctl.type = NVIF_IOCTL_V0_MAP;
+	memcpy(args->map.data, argv, argc);
 
-		if (object->map.ptr) {
+	ret = nvif_object_ioctl(object, args, argn, NULL);
+	*handle = args->map.handle;
+	*length = args->map.length;
+	maptype = args->map.type;
+	kfree(args);
+	return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+	struct nvif_client *client = object->client;
+	if (object->map.ptr) {
+		if (object->map.size) {
 			client->driver->unmap(client, object->map.ptr,
 						      object->map.size);
-			object->map.ptr = NULL;
+			object->map.size = 0;
 		}
-
-		nvif_object_ioctl(object, &args, sizeof(args), NULL);
-		object->map.size = 0;
+		object->map.ptr = NULL;
+		nvif_object_unmap_handle(object);
 	}
 }
 
 int
-nvif_object_map(struct nvif_object *object)
+nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
 {
 	struct nvif_client *client = object->client;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_map_v0 map;
-	} args = {
-		.ioctl.type = NVIF_IOCTL_V0_MAP,
-	};
-	int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
-	if (ret == 0) {
-		object->map.size = args.map.length;
-		object->map.ptr = client->driver->map(client, args.map.handle,
-						      object->map.size);
-		if (ret = -ENOMEM, object->map.ptr)
+	u64 handle, length;
+	int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
+	if (ret >= 0) {
+		if (ret) {
+			object->map.ptr = client->driver->map(client,
+							      handle,
+							      length);
+			if (ret = -ENOMEM, object->map.ptr) {
+				object->map.size = length;
+				return 0;
+			}
+		} else {
+			object->map.ptr = (void *)(unsigned long)handle;
 			return 0;
-		nvif_object_unmap(object);
+		}
+		nvif_object_unmap_handle(object);
 	}
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
new file mode 100644
index 0000000000000..31cdb2d2e1ff1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/if000c.h>
+
+int
+nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
+{
+	return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
+				&(struct nvif_vmm_unmap_v0) { .addr = addr },
+				sizeof(struct nvif_vmm_unmap_v0));
+}
+
+int
+nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
+	     struct nvif_mem *mem, u64 offset)
+{
+	struct nvif_vmm_map_v0 *args;
+	u8 stack[16];
+	int ret;
+
+	if (sizeof(*args) + argc > sizeof(stack)) {
+		if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+			return -ENOMEM;
+	} else {
+		args = (void *)stack;
+	}
+
+	args->version = 0;
+	args->addr = addr;
+	args->size = size;
+	args->memory = nvif_handle(&mem->object);
+	args->offset = offset;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
+			       args, sizeof(*args) + argc);
+	if (args != (void *)stack)
+		kfree(args);
+	return ret;
+}
+
+void
+nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+	if (vma->size) {
+		WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
+					 &(struct nvif_vmm_put_v0) {
+						.addr = vma->addr,
+					 }, sizeof(struct nvif_vmm_put_v0)));
+		vma->size = 0;
+	}
+}
+
+int
+nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
+	     u8 page, u8 align, u64 size, struct nvif_vma *vma)
+{
+	struct nvif_vmm_get_v0 args;
+	int ret;
+
+	args.version = vma->size = 0;
+	args.sparse = sparse;
+	args.page = page;
+	args.align = align;
+	args.size = size;
+
+	switch (type) {
+	case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
+	case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
+	case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
+			       &args, sizeof(args));
+	if (ret == 0) {
+		vma->addr = args.addr;
+		vma->size = args.size;
+	}
+	return ret;
+}
+
+void
+nvif_vmm_fini(struct nvif_vmm *vmm)
+{
+	kfree(vmm->page);
+	nvif_object_fini(&vmm->object);
+}
+
+int
+nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
+	      void *argv, u32 argc, struct nvif_vmm *vmm)
+{
+	struct nvif_vmm_v0 *args;
+	u32 argn = sizeof(*args) + argc;
+	int ret = -ENOSYS, i;
+
+	vmm->object.client = NULL;
+	vmm->page = NULL;
+
+	if (!(args = kmalloc(argn, GFP_KERNEL)))
+		return -ENOMEM;
+	args->version = 0;
+	args->addr = addr;
+	args->size = size;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
+			       &vmm->object);
+	if (ret)
+		goto done;
+
+	vmm->start = args->addr;
+	vmm->limit = args->size;
+
+	vmm->page_nr = args->page_nr;
+	vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+	if (!vmm->page) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0; i < vmm->page_nr; i++) {
+		struct nvif_vmm_page_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
+				       &args, sizeof(args));
+		if (ret)
+			break;
+
+		vmm->page[i].shift = args.shift;
+		vmm->page[i].sparse = args.sparse;
+		vmm->page[i].vram = args.vram;
+		vmm->page[i].host = args.host;
+		vmm->page[i].comp = args.comp;
+	}
+
+done:
+	if (ret)
+		nvif_vmm_fini(vmm);
+	kfree(args);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 0d3a896892b4b..ac671202919e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
 	client->debug = nvkm_dbgopt(dbg, "CLIENT");
 	client->objroot = RB_ROOT;
 	client->ntfy = ntfy;
+	INIT_LIST_HEAD(&client->umem);
+	spin_lock_init(&client->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index b6c916954a107..657231c3c098a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
 	return ret;
 }
 
+static int
+nvkm_engine_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->preinit)
+		engine->func->preinit(engine);
+	return 0;
+}
+
 static void *
 nvkm_engine_dtor(struct nvkm_subdev *subdev)
 {
@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 nvkm_engine_func = {
 	.dtor = nvkm_engine_dtor,
+	.preinit = nvkm_engine_preinit,
 	.init = nvkm_engine_init,
 	.fini = nvkm_engine_fini,
 	.intr = nvkm_engine_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index a7bd22706b2a5..d6de2b3ed2c30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 }
 
 /* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+		     struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		     void *argv, u32 argc)
+{
+	return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
 static u32
 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
 	.release = nvkm_gpuobj_heap_release,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
+	.map = nvkm_gpuobj_heap_map,
 };
 
 static const struct nvkm_gpuobj_func
@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
 	.release = nvkm_gpuobj_heap_release,
 	.rd32 = nvkm_gpuobj_heap_rd32,
 	.wr32 = nvkm_gpuobj_heap_wr32,
+	.map = nvkm_gpuobj_heap_map,
 };
 
 static void *
@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
 static const struct nvkm_gpuobj_func
 nvkm_gpuobj_heap = {
 	.acquire = nvkm_gpuobj_heap_acquire,
+	.map = nvkm_gpuobj_heap_map,
 };
 
 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+		struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		void *argv, u32 argc)
+{
+	return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+			       vmm, vma, argv, argc);
+}
+
 static u32
 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
 	.release = nvkm_gpuobj_release,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
+	.map = nvkm_gpuobj_map,
 };
 
 static const struct nvkm_gpuobj_func
@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
 	.release = nvkm_gpuobj_release,
 	.rd32 = nvkm_gpuobj_rd32,
 	.wr32 = nvkm_gpuobj_wr32,
+	.map = nvkm_gpuobj_map,
 };
 
 static void *
@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
 static const struct nvkm_gpuobj_func
 nvkm_gpuobj_func = {
 	.acquire = nvkm_gpuobj_acquire,
+	.map = nvkm_gpuobj_map,
 };
 
 static int
@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
 		gpuobj->size = nvkm_memory_size(gpuobj->memory);
 	}
 
-	return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+	return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
 }
 
 void
@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
 		if (gpuobj->parent)
 			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
 		nvkm_mm_fini(&gpuobj->heap);
-		nvkm_memory_del(&gpuobj->memory);
+		nvkm_memory_unref(&gpuobj->memory);
 		kfree(*pgpuobj);
 		*pgpuobj = NULL;
 	}
@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
 	return ret;
 }
 
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-		u32 access, struct nvkm_vma *vma)
-{
-	struct nvkm_memory *memory = gpuobj->memory;
-	int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
-	if (ret == 0)
-		nvkm_memory_map(memory, vma, 0);
-	return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
-	if (vma->node) {
-		nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-	}
-}
-
 /* the below is basically only here to support sharing the paged dma object
  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
  * anywhere else.
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index be19bbe56bba8..d777df5a64e6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
 	union {
 		struct nvif_ioctl_sclass_v0 v0;
 	} *args = data;
-	struct nvkm_oclass oclass;
+	struct nvkm_oclass oclass = { .client = client };
 	int ret = -ENOSYS, i = 0;
 
 	nvif_ioctl(object, "sclass size %d\n", size);
@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client,
 	union {
 		struct nvif_ioctl_map_v0 v0;
 	} *args = data;
+	enum nvkm_object_map type;
 	int ret = -ENOSYS;
 
 	nvif_ioctl(object, "map size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
 		nvif_ioctl(object, "map vers %d\n", args->v0.version);
-		ret = nvkm_object_map(object, &args->v0.handle,
-					      &args->v0.length);
+		ret = nvkm_object_map(object, data, size, &type,
+				      &args->v0.handle,
+				      &args->v0.length);
+		if (type == NVKM_OBJECT_MAP_IO)
+			args->v0.type = NVIF_IOCTL_MAP_V0_IO;
+		else
+			args->v0.type = NVIF_IOCTL_MAP_V0_VA;
 	}
 
 	return ret;
@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client,
 	nvif_ioctl(object, "unmap size %d\n", size);
 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
 		nvif_ioctl(object, "unmap\n");
+		ret = nvkm_object_unmap(object);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 8903c04c977e8..29f4b4070b557 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -22,27 +22,116 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include <core/memory.h>
+#include <core/mm.h>
+#include <subdev/fb.h>
 #include <subdev/instmem.h>
 
+void
+nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
+		     struct nvkm_tags **ptags)
+{
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_tags *tags = *ptags;
+	if (tags) {
+		mutex_lock(&fb->subdev.mutex);
+		if (refcount_dec_and_test(&tags->refcount)) {
+			nvkm_mm_free(&fb->tags, &tags->mn);
+			kfree(memory->tags);
+			memory->tags = NULL;
+		}
+		mutex_unlock(&fb->subdev.mutex);
+		*ptags = NULL;
+	}
+}
+
+int
+nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
+		     u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
+		     struct nvkm_tags **ptags)
+{
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_tags *tags;
+
+	mutex_lock(&fb->subdev.mutex);
+	if ((tags = memory->tags)) {
+		/* If comptags exist for the memory, but a different amount
+		 * than requested, the buffer is being mapped with settings
+		 * that are incompatible with existing mappings.
+		 */
+		if (tags->mn && tags->mn->length != nr) {
+			mutex_unlock(&fb->subdev.mutex);
+			return -EINVAL;
+		}
+
+		refcount_inc(&tags->refcount);
+		*ptags = tags;
+		return 0;
+	}
+
+	if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
+		mutex_unlock(&fb->subdev.mutex);
+		return -ENOMEM;
+	}
+
+	if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+		if (clr)
+			clr(device, tags->mn->offset, tags->mn->length);
+	} else {
+		/* Failure to allocate HW comptags is not an error, the
+		 * caller should fall back to an uncompressed map.
+		 *
+		 * As memory can be mapped in multiple places, we still
+		 * need to track the allocation failure and ensure that
+		 * any additional mappings remain uncompressed.
+		 *
+		 * This is handled by returning an empty nvkm_tags.
+		 */
+		tags->mn = NULL;
+	}
+
+	refcount_set(&tags->refcount, 1);
+	mutex_unlock(&fb->subdev.mutex);
+	*ptags = tags;
+	return 0;
+}
+
 void
 nvkm_memory_ctor(const struct nvkm_memory_func *func,
 		 struct nvkm_memory *memory)
 {
 	memory->func = func;
+	kref_init(&memory->kref);
+}
+
+static void
+nvkm_memory_del(struct kref *kref)
+{
+	struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
+	if (!WARN_ON(!memory->func)) {
+		if (memory->func->dtor)
+			memory = memory->func->dtor(memory);
+		kfree(memory);
+	}
 }
 
 void
-nvkm_memory_del(struct nvkm_memory **pmemory)
+nvkm_memory_unref(struct nvkm_memory **pmemory)
 {
 	struct nvkm_memory *memory = *pmemory;
-	if (memory && !WARN_ON(!memory->func)) {
-		if (memory->func->dtor)
-			*pmemory = memory->func->dtor(memory);
-		kfree(*pmemory);
+	if (memory) {
+		kref_put(&memory->kref, nvkm_memory_del);
 		*pmemory = NULL;
 	}
 }
 
+struct nvkm_memory *
+nvkm_memory_ref(struct nvkm_memory *memory)
+{
+	if (memory)
+		kref_get(&memory->kref);
+	return memory;
+}
+
 int
 nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
 		u64 size, u32 align, bool zero,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 5c7891234eea5..f78a06a6b2f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
 }
 
 int
-nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
 {
 	struct nvkm_mm_node *node, *prev;
 	u32 next;
@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
 
 	list_add_tail(&node->nl_entry, &mm->nodes);
 	list_add_tail(&node->fl_entry, &mm->free);
-	node->heap = ++mm->heap_nodes;
+	node->heap = heap;
+	mm->heap_nodes++;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index acd76fd4f6d85..301a5e5b5f7f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
 }
 
 int
-nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
+		enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	if (likely(object->func->map))
-		return object->func->map(object, addr, size);
+		return object->func->map(object, argv, argc, type, addr, size);
+	return -ENODEV;
+}
+
+int
+nvkm_object_unmap(struct nvkm_object *object)
+{
+	if (likely(object->func->unmap))
+		return object->func->unmap(object);
 	return -ENODEV;
 }
 
@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object)
 	}
 
 	nvif_debug(object, "destroy running...\n");
+	nvkm_object_unmap(object);
 	if (object->func->dtor)
 		data = object->func->dtor(object);
 	nvkm_engine_unref(&object->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index e31a0479add0d..16299837a296c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
 }
 
 static int
-nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
+		enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
-	return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
+}
+
+static int
+nvkm_oproxy_unmap(struct nvkm_object *object)
+{
+	return nvkm_object_unmap(nvkm_oproxy(object)->object);
 }
 
 static int
@@ -171,6 +179,7 @@ nvkm_oproxy_func = {
 	.mthd = nvkm_oproxy_mthd,
 	.ntfy = nvkm_oproxy_ntfy,
 	.map = nvkm_oproxy_map,
+	.unmap = nvkm_oproxy_unmap,
 	.rd08 = nvkm_oproxy_rd08,
 	.rd16 = nvkm_oproxy_rd16,
 	.rd32 = nvkm_oproxy_rd32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 89da47234016b..ccba4ae73cc59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -21,6 +21,7 @@
  */
 #include <core/ramht.h>
 #include <core/engine.h>
+#include <core/object.h>
 
 static u32
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e096a5d9c292f..e146436156985 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -927,7 +927,7 @@ nv84_chipset = {
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
 	.therm = g84_therm_new,
@@ -959,7 +959,7 @@ nv86_chipset = {
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
 	.therm = g84_therm_new,
@@ -991,7 +991,7 @@ nv92_chipset = {
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g92_pci_new,
 	.therm = g84_therm_new,
@@ -1023,7 +1023,7 @@ nv94_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1055,7 +1055,7 @@ nv96_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1087,7 +1087,7 @@ nv98_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1119,7 +1119,7 @@ nva0_chipset = {
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1151,7 +1151,7 @@ nva3_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
@@ -1185,7 +1185,7 @@ nva5_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
@@ -1218,7 +1218,7 @@ nva8_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
@@ -1315,7 +1315,7 @@ nvaf_chipset = {
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
@@ -1678,7 +1678,7 @@ nve4_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
@@ -1717,7 +1717,7 @@ nve6_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
@@ -1756,7 +1756,7 @@ nve7_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
@@ -1790,7 +1790,7 @@ nvea_chipset = {
 	.imem = gk20a_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk20a_mmu_new,
 	.pmu = gk20a_pmu_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
@@ -1820,7 +1820,7 @@ nvf0_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
@@ -1858,7 +1858,7 @@ nvf1_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
@@ -1896,7 +1896,7 @@ nv106_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk208_pmu_new,
@@ -1934,7 +1934,7 @@ nv108_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk208_pmu_new,
@@ -1958,7 +1958,7 @@ nv108_chipset = {
 static const struct nvkm_device_chip
 nv117_chipset = {
 	.name = "GM107",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.clk = gk104_clk_new,
@@ -1972,7 +1972,7 @@ nv117_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gm107_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
@@ -1992,7 +1992,7 @@ nv117_chipset = {
 static const struct nvkm_device_chip
 nv118_chipset = {
 	.name = "GM108",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.clk = gk104_clk_new,
@@ -2006,7 +2006,7 @@ nv118_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gm107_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
@@ -2026,7 +2026,7 @@ nv118_chipset = {
 static const struct nvkm_device_chip
 nv120_chipset = {
 	.name = "GM200",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2039,7 +2039,7 @@ nv120_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
@@ -2061,7 +2061,7 @@ nv120_chipset = {
 static const struct nvkm_device_chip
 nv124_chipset = {
 	.name = "GM204",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2074,7 +2074,7 @@ nv124_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
@@ -2096,7 +2096,7 @@ nv124_chipset = {
 static const struct nvkm_device_chip
 nv126_chipset = {
 	.name = "GM206",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2109,7 +2109,7 @@ nv126_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
@@ -2131,7 +2131,7 @@ nv126_chipset = {
 static const struct nvkm_device_chip
 nv12b_chipset = {
 	.name = "GM20B",
-	.bar = gk20a_bar_new,
+	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
 	.clk = gm20b_clk_new,
 	.fb = gm20b_fb_new,
@@ -2140,7 +2140,7 @@ nv12b_chipset = {
 	.imem = gk20a_instmem_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm20b_mmu_new,
 	.pmu = gm20b_pmu_new,
 	.secboot = gm20b_secboot_new,
 	.timer = gk20a_timer_new,
@@ -2156,7 +2156,7 @@ nv12b_chipset = {
 static const struct nvkm_device_chip
 nv130_chipset = {
 	.name = "GP100",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2168,7 +2168,8 @@ nv130_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gm200_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp100_pmu_new,
@@ -2190,7 +2191,7 @@ nv130_chipset = {
 static const struct nvkm_device_chip
 nv132_chipset = {
 	.name = "GP102",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2202,7 +2203,8 @@ nv132_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
@@ -2224,7 +2226,7 @@ nv132_chipset = {
 static const struct nvkm_device_chip
 nv134_chipset = {
 	.name = "GP104",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2236,7 +2238,8 @@ nv134_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
@@ -2258,7 +2261,7 @@ nv134_chipset = {
 static const struct nvkm_device_chip
 nv136_chipset = {
 	.name = "GP106",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2270,7 +2273,8 @@ nv136_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
@@ -2292,7 +2296,7 @@ nv136_chipset = {
 static const struct nvkm_device_chip
 nv137_chipset = {
 	.name = "GP107",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2304,7 +2308,8 @@ nv137_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
@@ -2326,7 +2331,7 @@ nv137_chipset = {
 static const struct nvkm_device_chip
 nv138_chipset = {
 	.name = "GP108",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
@@ -2338,7 +2343,8 @@ nv138_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
@@ -2355,7 +2361,7 @@ nv138_chipset = {
 static const struct nvkm_device_chip
 nv13b_chipset = {
 	.name = "GP10B",
-	.bar = gk20a_bar_new,
+	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
 	.fb = gp10b_fb_new,
 	.fuse = gm107_fuse_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index 20249d8e444de..2c3c3ee3c494a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -1,7 +1,7 @@
 #ifndef __NVKM_DEVICE_CTRL_H__
 #define __NVKM_DEVICE_CTRL_H__
 #define nvkm_control(p) container_of((p), struct nvkm_control, object)
-#include <core/device.h>
+#include <core/object.h>
 
 struct nvkm_control {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 74a1ffa425f73..f302d2b5782a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
 	const struct nvkm_device_pci_vendor *pciv;
 	const char *name = NULL;
 	struct nvkm_device_pci *pdev;
-	int ret;
+	int ret, bits;
 
 	ret = pci_enable_device(pci_dev);
 	if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
 	if (ret)
 		return ret;
 
-	/*
-	 * Set a preliminary DMA mask based on the .dma_bits member of the
-	 * MMU subdevice. This allows other subdevices to create DMA mappings
-	 * in their init() or oneinit() methods, which may be called before the
-	 * TTM layer sets the DMA mask definitively.
-	 * This is necessary for platforms where the default DMA mask of 32
-	 * does not cover any system memory, i.e., when all RAM is > 4 GB.
-	 */
-	if (pdev->device.mmu)
-		dma_set_mask_and_coherent(&pci_dev->dev,
-				DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+	/* Set DMA mask based on capabilities reported by the MMU subdev. */
+	if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+		bits = pdev->device.mmu->dma_bits;
+	else
+		bits = 32;
+
+	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+	if (ret && bits != 32) {
+		dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+		pdev->device.mmu->dma_bits = 32;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 189ed80e21ffb..78597da6313ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
 		if (ret)
 			goto free_domain;
 
-		ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
 				   (1ULL << tdev->func->iommu_bit) >>
 				   tdev->iommu.pgshift, 1);
 		if (ret)
@@ -216,7 +216,7 @@ nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
 	if (tdev->irq) {
 		free_irq(tdev->irq, tdev);
 		tdev->irq = 0;
-	};
+	}
 }
 
 static int
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 
 	/**
 	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
-	 * This will be refined in nouveau_ttm_init but we need to do it early
-	 * for instmem to behave properly
 	 */
 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 513ee6b79553d..17adcb4e8854a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -206,10 +206,12 @@ nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
 }
 
 static int
-nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
+		 enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nvkm_udevice *udev = nvkm_udevice(object);
 	struct nvkm_device *device = udev->device;
+	*type = NVKM_OBJECT_MAP_IO;
 	*addr = device->func->resource_addr(device, 0);
 	*size = device->func->resource_size(device, 0);
 	return 0;
@@ -292,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
 	if (!sclass) {
 		switch (index) {
 		case 0: sclass = &nvkm_control_oclass; break;
+		case 1:
+			if (!device->mmu)
+				return -EINVAL;
+			sclass = &device->mmu->user;
+			break;
 		default:
 			return -EINVAL;
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 0c0310498afdb..723dcbde2ac2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -191,11 +191,13 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
 }
 
 static int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+		   enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*type = NVKM_OBJECT_MAP_IO;
 	*addr = device->func->resource_addr(device, 0) +
 		0x640000 + (chan->chid.user * 0x1000);
 	*size = 0x001000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 737b38f6fbd2b..9bb4ad5b0e57f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -1,6 +1,7 @@
 #ifndef __NV50_DISP_CHAN_H__
 #define __NV50_DISP_CHAN_H__
 #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include <core/object.h>
 #include "nv50.h"
 
 struct nv50_disp_chan {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a1e8bf48b7784..c9e0a8f7b5d5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -147,7 +147,7 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
 
 #define IOR_MSG(i,l,f,a...) do {                                               \
 	struct nvkm_ior *_ior = (i);                                           \
-	nvkm_##l(&_ior->disp->engine.subdev, "%s: "f, _ior->name, ##a);        \
+	nvkm_##l(&_ior->disp->engine.subdev, "%s: "f"\n", _ior->name, ##a);    \
 } while(0)
 #define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
 #define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index c95942ef82167..49ef7e57aad41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -26,7 +26,7 @@
 
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
+#include <subdev/mmu/vmm.h>
 
 #include <nvif/class.h>
 
@@ -49,8 +49,8 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
 	int ret;
 
 	if (dmaobj->clone) {
-		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
-		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		struct nvkm_memory *pgt =
+			device->mmu->vmm->pd->pt[0]->memory;
 		if (!dmaobj->base.start)
 			return nvkm_gpuobj_wrap(pgt, pgpuobj);
 		nvkm_kmap(pgt);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 2e7b4e2105efc..816ccaedfc732 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -99,7 +99,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 	const u32 base = falcon->addr;
 
 	if (!suspend) {
-		nvkm_memory_del(&falcon->core);
+		nvkm_memory_unref(&falcon->core);
 		if (falcon->external) {
 			vfree(falcon->data.data);
 			vfree(falcon->code.data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 660ca7aa95ea3..64f6b7654a08d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -27,6 +27,7 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <core/notify.h>
+#include <subdev/mc.h>
 
 #include <nvif/event.h>
 #include <nvif/unpack.h>
@@ -278,6 +279,12 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
 	return 0;
 }
 
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+}
+
 static int
 nvkm_fifo_init(struct nvkm_engine *engine)
 {
@@ -302,6 +309,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 nvkm_fifo = {
 	.dtor = nvkm_fifo_dtor,
+	.preinit = nvkm_fifo_preinit,
 	.oneinit = nvkm_fifo_oneinit,
 	.init = nvkm_fifo_init,
 	.fini = nvkm_fifo_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index fab760ae922fb..d83485385934d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -117,8 +117,8 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
 		if (chan->func->engine_dtor)
 			chan->func->engine_dtor(chan, engine);
 		nvkm_object_del(&engn->object);
-		if (chan->vm)
-			atomic_dec(&chan->vm->engref[engine->subdev.index]);
+		if (chan->vmm)
+			atomic_dec(&chan->vmm->engref[engine->subdev.index]);
 	}
 }
 
@@ -151,8 +151,8 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
 			.engine = oclass->engine,
 		};
 
-		if (chan->vm)
-			atomic_inc(&chan->vm->engref[engine->subdev.index]);
+		if (chan->vmm)
+			atomic_inc(&chan->vmm->engref[engine->subdev.index]);
 
 		if (engine->func->fifo.cclass) {
 			ret = engine->func->fifo.cclass(chan, &cclass,
@@ -253,9 +253,11 @@ nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
 }
 
 static int
-nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+		   enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	*type = NVKM_OBJECT_MAP_IO;
 	*addr = chan->addr;
 	*size = chan->size;
 	return 0;
@@ -325,7 +327,10 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
 	if (chan->user)
 		iounmap(chan->user);
 
-	nvkm_vm_ref(NULL, &chan->vm, NULL);
+	if (chan->vmm) {
+		nvkm_vmm_part(chan->vmm, chan->inst->memory);
+		nvkm_vmm_unref(&chan->vmm);
+	}
 
 	nvkm_gpuobj_del(&chan->push);
 	nvkm_gpuobj_del(&chan->inst);
@@ -347,13 +352,12 @@ nvkm_fifo_chan_func = {
 int
 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
 		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
-		    u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
-		    const struct nvkm_oclass *oclass,
+		    u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+		    u32 user, const struct nvkm_oclass *oclass,
 		    struct nvkm_fifo_chan *chan)
 {
 	struct nvkm_client *client = oclass->client;
 	struct nvkm_device *device = fifo->engine.subdev.device;
-	struct nvkm_mmu *mmu = device->mmu;
 	struct nvkm_dmaobj *dmaobj;
 	unsigned long flags;
 	int ret;
@@ -382,16 +386,19 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
 	}
 
 	/* channel address space */
-	if (!vm && mmu) {
-		if (!client->vm || client->vm->mmu == mmu) {
-			ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
-			if (ret)
-				return ret;
-		} else {
+	if (hvmm) {
+		struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
+		if (IS_ERR(vmm))
+			return PTR_ERR(vmm);
+
+		if (vmm->mmu != device->mmu)
 			return -EINVAL;
-		}
-	} else {
-		return -ENOENT;
+
+		ret = nvkm_vmm_join(vmm, chan->inst->memory);
+		if (ret)
+			return ret;
+
+		chan->vmm = nvkm_vmm_ref(vmm);
 	}
 
 	/* allocate channel id */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 61797c4dd07a7..a5c998fe4485a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -229,15 +229,18 @@ g84_fifo_chan_func = {
 };
 
 int
-g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
 		   const struct nvkm_oclass *oclass,
 		   struct nv50_fifo_chan *chan)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
+	if (!vmm)
+		return -EINVAL;
+
 	ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
-				  0x10000, 0x1000, false, vm, push,
+				  0x10000, 0x1000, false, vmm, push,
 				  (1ULL << NVKM_ENGINE_BSP) |
 				  (1ULL << NVKM_ENGINE_CE0) |
 				  (1ULL << NVKM_ENGINE_CIPHER) |
@@ -277,9 +280,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
 	if (ret)
 		return ret;
 
-	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
-	if (ret)
-		return ret;
-
-	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 7d697e2dce1ab..fc1142af02cfb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -11,12 +11,9 @@ struct gf100_fifo_chan {
 	struct list_head head;
 	bool killed;
 
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-
 	struct {
 		struct nvkm_gpuobj *inst;
-		struct nvkm_vma vma;
+		struct nvkm_vma *vma;
 	} engn[NVKM_SUBDEV_NR];
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 230f64e5f7318..5beb5c628473e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -12,12 +12,9 @@ struct gk104_fifo_chan {
 	struct list_head head;
 	bool killed;
 
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-
 	struct {
 		struct nvkm_gpuobj *inst;
-		struct nvkm_vma vma;
+		struct nvkm_vma *vma;
 	} engn[NVKM_SUBDEV_NR];
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 25b60aff40e47..85f7dbf53c997 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -206,7 +206,6 @@ void *
 nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
 {
 	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
-	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
 	nvkm_ramht_del(&chan->ramht);
 	nvkm_gpuobj_del(&chan->pgd);
 	nvkm_gpuobj_del(&chan->eng);
@@ -229,15 +228,18 @@ nv50_fifo_chan_func = {
 };
 
 int
-nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
 		    const struct nvkm_oclass *oclass,
 		    struct nv50_fifo_chan *chan)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
+	if (!vmm)
+		return -EINVAL;
+
 	ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
-				  0x10000, 0x1000, false, vm, push,
+				  0x10000, 0x1000, false, vmm, push,
 				  (1ULL << NVKM_ENGINE_DMAOBJ) |
 				  (1ULL << NVKM_ENGINE_SW) |
 				  (1ULL << NVKM_ENGINE_GR) |
@@ -262,9 +264,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
 	if (ret)
 		return ret;
 
-	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
-	if (ret)
-		return ret;
-
-	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index 4b9da469b704e..d853056e040bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -13,19 +13,18 @@ struct nv50_fifo_chan {
 	struct nvkm_gpuobj *eng;
 	struct nvkm_gpuobj *pgd;
 	struct nvkm_ramht *ramht;
-	struct nvkm_vm *vm;
 
 	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
 };
 
-int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
 			const struct nvkm_oclass *, struct nv50_fifo_chan *);
 void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
 void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
 void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
 void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
 
-int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
 		       const struct nvkm_oclass *, struct nv50_fifo_chan *);
 
 extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
index caa9140747525..fc34cddcd2f51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -44,9 +44,9 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel dma size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+		nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
 				   "pushbuf %llx offset %016llx\n",
-			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.version, args->v0.vmm, args->v0.pushbuf,
 			   args->v0.offset);
 		if (!args->v0.pushbuf)
 			return -EINVAL;
@@ -57,7 +57,7 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		return -ENOMEM;
 	*pobject = &chan->base.object;
 
-	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
 				 oclass, chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index 0a7b6ed5ed284..c213122cf0885 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -95,6 +95,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
 		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
 
 		c = fifo->ramfc;
+		nvkm_kmap(fctx);
 		do {
 			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
 			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
@@ -102,6 +103,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
 			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
 			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
 		} while ((++c)->bits);
+		nvkm_done(fctx);
 
 		c = fifo->ramfc;
 		do {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
index 480bc3777be59..8043718ad1504 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -44,9 +44,9 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel dma size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+		nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
 				   "pushbuf %llx offset %016llx\n",
-			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.version, args->v0.vmm, args->v0.pushbuf,
 			   args->v0.offset);
 		if (!args->v0.pushbuf)
 			return -EINVAL;
@@ -57,7 +57,7 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		return -ENOMEM;
 	*pobject = &chan->base.object;
 
-	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
 				  oclass, chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index cd468ab1db125..f695768681643 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -559,6 +559,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
 	struct gf100_fifo *fifo = gf100_fifo(base);
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
+	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
 	int ret;
 
 	/* Determine number of PBDMAs by checking valid enable bits. */
@@ -584,12 +585,12 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
 	if (ret)
 		return ret;
 
-	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
+	ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+			   &fifo->user.bar);
 	if (ret)
 		return ret;
 
-	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
-	return 0;
+	return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
 }
 
 static void
@@ -628,7 +629,7 @@ gf100_fifo_init(struct nvkm_fifo *base)
 	}
 
 	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
-	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 
 	nvkm_wr32(device, 0x002100, 0xffffffff);
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -639,10 +640,11 @@ static void *
 gf100_fifo_dtor(struct nvkm_fifo *base)
 {
 	struct gf100_fifo *fifo = gf100_fifo(base);
-	nvkm_vm_put(&fifo->user.bar);
-	nvkm_memory_del(&fifo->user.mem);
-	nvkm_memory_del(&fifo->runlist.mem[0]);
-	nvkm_memory_del(&fifo->runlist.mem[1]);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+	nvkm_memory_unref(&fifo->user.mem);
+	nvkm_memory_unref(&fifo->runlist.mem[0]);
+	nvkm_memory_unref(&fifo->runlist.mem[1]);
 	return fifo;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 70db58eab9c3d..b81a2ad48aa4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -26,7 +26,7 @@ struct gf100_fifo {
 
 	struct {
 		struct nvkm_memory *mem;
-		struct nvkm_vma bar;
+		struct nvkm_vma *bar;
 	} user;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index a7e55c422501c..84bd703dd8971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -771,6 +771,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
+	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
 	int engn, runl, pbid, ret, i, j;
 	enum nvkm_devidx engidx;
 	u32 *map;
@@ -834,13 +835,12 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 	if (ret)
 		return ret;
 
-	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
-			    &fifo->user.bar);
+	ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+			   &fifo->user.bar);
 	if (ret)
 		return ret;
 
-	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
-	return 0;
+	return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
 }
 
 static void
@@ -866,7 +866,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
 	}
 
-	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 
 	nvkm_wr32(device, 0x002100, 0xffffffff);
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -876,14 +876,15 @@ static void *
 gk104_fifo_dtor(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int i;
 
-	nvkm_vm_put(&fifo->user.bar);
-	nvkm_memory_del(&fifo->user.mem);
+	nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+	nvkm_memory_unref(&fifo->user.mem);
 
 	for (i = 0; i < fifo->runlist_nr; i++) {
-		nvkm_memory_del(&fifo->runlist[i].mem[1]);
-		nvkm_memory_del(&fifo->runlist[i].mem[0]);
+		nvkm_memory_unref(&fifo->runlist[i].mem[1]);
+		nvkm_memory_unref(&fifo->runlist[i].mem[0]);
 	}
 
 	return fifo;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 44bff98d67251..466f1051f91af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -37,7 +37,7 @@ struct gk104_fifo {
 
 	struct {
 		struct nvkm_memory *mem;
-		struct nvkm_vma bar;
+		struct nvkm_vma *bar;
 	} user;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
index 77c2f2a28bf3b..2121f517b1dda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -45,10 +45,10 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "pushbuf %llx ioffset %016llx "
 				   "ilength %08x\n",
-			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.version, args->v0.vmm, args->v0.pushbuf,
 			   args->v0.ioffset, args->v0.ilength);
 		if (!args->v0.pushbuf)
 			return -EINVAL;
@@ -59,7 +59,7 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		return -ENOMEM;
 	*pobject = &chan->base.object;
 
-	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
 				 oclass, chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index f9e0377d3d24b..75f9632789b33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -111,7 +111,7 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 	struct nvkm_gpuobj *inst = chan->base.inst;
 
 	if (offset) {
-		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		u64 addr = chan->engn[engine->subdev.index].vma->addr;
 		nvkm_kmap(inst);
 		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
 		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
@@ -126,7 +126,7 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
-	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
 	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
 }
 
@@ -146,8 +146,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 	if (ret)
 		return ret;
 
-	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
-			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+	ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+			   &chan->engn[engn].vma);
+	if (ret)
+		return ret;
+
+	return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+			       chan->engn[engn].vma, NULL, 0);
 }
 
 static void
@@ -190,10 +195,7 @@ gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 static void *
 gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
-	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
-	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
-	nvkm_gpuobj_del(&chan->pgd);
-	return chan;
+	return gf100_fifo_chan(base);
 }
 
 static const struct nvkm_fifo_chan_func
@@ -216,7 +218,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		struct fermi_channel_gpfifo_v0 v0;
 	} *args = data;
 	struct gf100_fifo *fifo = gf100_fifo(base);
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_object *parent = oclass->parent;
 	struct gf100_fifo_chan *chan;
 	u64 usermem, ioffset, ilength;
@@ -224,10 +225,12 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "ioffset %016llx ilength %08x\n",
-			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.version, args->v0.vmm, args->v0.ioffset,
 			   args->v0.ilength);
+		if (!args->v0.vmm)
+			return -EINVAL;
 	} else
 		return ret;
 
@@ -239,7 +242,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 	INIT_LIST_HEAD(&chan->head);
 
 	ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
-				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  0x1000, 0x1000, true, args->v0.vmm, 0,
 				  (1ULL << NVKM_ENGINE_CE0) |
 				  (1ULL << NVKM_ENGINE_CE1) |
 				  (1ULL << NVKM_ENGINE_GR) |
@@ -247,29 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 				  (1ULL << NVKM_ENGINE_MSPPP) |
 				  (1ULL << NVKM_ENGINE_MSVLD) |
 				  (1ULL << NVKM_ENGINE_SW),
-				  1, fifo->user.bar.offset, 0x1000,
+				  1, fifo->user.bar->addr, 0x1000,
 				  oclass, &chan->base);
 	if (ret)
 		return ret;
 
 	args->v0.chid = chan->base.chid;
 
-	/* page directory */
-	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
-	if (ret)
-		return ret;
-
-	nvkm_kmap(chan->base.inst);
-	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
-	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
-	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
-	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
-	nvkm_done(chan->base.inst);
-
-	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
-	if (ret)
-		return ret;
-
 	/* clear channel control registers */
 
 	usermem = chan->base.chid * 0x1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 8abf6f8ef445d..80c87521bebee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -117,7 +117,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 
 	if (offset) {
-		u64   addr = chan->engn[engine->subdev.index].vma.offset;
+		u64   addr = chan->engn[engine->subdev.index].vma->addr;
 		u32 datalo = lower_32_bits(addr) | 0x00000004;
 		u32 datahi = upper_32_bits(addr);
 		nvkm_kmap(inst);
@@ -138,7 +138,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
-	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
 	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
 }
 
@@ -158,8 +158,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 	if (ret)
 		return ret;
 
-	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
-			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+	ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+			   &chan->engn[engn].vma);
+	if (ret)
+		return ret;
+
+	return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+			       chan->engn[engn].vma, NULL, 0);
 }
 
 static void
@@ -203,10 +208,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 static void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
-	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
-	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
-	nvkm_gpuobj_del(&chan->pgd);
-	return chan;
+	return gk104_fifo_chan(base);
 }
 
 static const struct nvkm_fifo_chan_func
@@ -229,17 +231,19 @@ struct gk104_fifo_chan_func {
 static int
 gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
 		       struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
-		       u64 vm, u64 ioffset, u64 ilength,
+		       u64 vmm, u64 ioffset, u64 ilength,
 		       const struct nvkm_oclass *oclass,
 		       struct nvkm_object **pobject)
 {
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct gk104_fifo_chan *chan;
 	int runlist = -1, ret = -ENOSYS, i, j;
 	u32 engines = 0, present = 0;
 	u64 subdevs = 0;
 	u64 usermem;
 
+	if (!vmm)
+		return -EINVAL;
+
 	/* Determine which downstream engines are present */
 	for (i = 0; i < fifo->engine_nr; i++) {
 		struct nvkm_engine *engine = fifo->engine[i].engine;
@@ -285,30 +289,14 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
 	INIT_LIST_HEAD(&chan->head);
 
 	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
-				  0x1000, 0x1000, true, vm, 0, subdevs,
-				  1, fifo->user.bar.offset, 0x200,
+				  0x1000, 0x1000, true, vmm, 0, subdevs,
+				  1, fifo->user.bar->addr, 0x200,
 				  oclass, &chan->base);
 	if (ret)
 		return ret;
 
 	*chid = chan->base.chid;
 
-	/* Page directory. */
-	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
-	if (ret)
-		return ret;
-
-	nvkm_kmap(chan->base.inst);
-	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
-	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
-	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
-	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
-	nvkm_done(chan->base.inst);
-
-	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
-	if (ret)
-		return ret;
-
 	/* Clear channel control registers. */
 	usermem = chan->base.chid * 0x200;
 	ilength = order_base_2(ilength / 8);
@@ -373,18 +361,17 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "ioffset %016llx ilength %08x engine %08x\n",
-			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.version, args->v0.vmm, args->v0.ioffset,
 			   args->v0.ilength, args->v0.engines);
 		return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
 					      &args->v0.engines,
 					      &args->v0.chid,
-					       args->v0.vm,
+					       args->v0.vmm,
 					       args->v0.ioffset,
 					       args->v0.ilength,
 					      oclass, pobject);
-
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
index c5a7de9db2596..d8f28ec1e4a81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -45,10 +45,10 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
 				   "pushbuf %llx ioffset %016llx "
 				   "ilength %08x\n",
-			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.version, args->v0.vmm, args->v0.pushbuf,
 			   args->v0.ioffset, args->v0.ilength);
 		if (!args->v0.pushbuf)
 			return -EINVAL;
@@ -59,7 +59,7 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		return -ENOMEM;
 	*pobject = &chan->base.object;
 
-	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
 				  oclass, chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 66eb12c2b5ba7..fa6e094d80686 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -100,8 +100,8 @@ void *
 nv50_fifo_dtor(struct nvkm_fifo *base)
 {
 	struct nv50_fifo *fifo = nv50_fifo(base);
-	nvkm_memory_del(&fifo->runlist[1]);
-	nvkm_memory_del(&fifo->runlist[0]);
+	nvkm_memory_unref(&fifo->runlist[1]);
+	nvkm_memory_unref(&fifo->runlist[0]);
 	return fifo;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index bc77eea351a5c..881015080d838 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -986,14 +986,14 @@ gf100_grctx_pack_tpc[] = {
  ******************************************************************************/
 
 int
-gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, u32 access)
+gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, bool priv)
 {
 	if (info->data) {
 		info->buffer[info->buffer_nr] = round_up(info->addr, align);
 		info->addr = info->buffer[info->buffer_nr] + size;
 		info->data->size = size;
 		info->data->align = align;
-		info->data->access = access;
+		info->data->priv = priv;
 		info->data++;
 		return info->buffer_nr++;
 	}
@@ -1028,9 +1028,8 @@ void
 gf100_grctx_generate_bundle(struct gf100_grctx *info)
 {
 	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
 	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -1041,9 +1040,8 @@ void
 gf100_grctx_generate_pagepool(struct gf100_grctx *info)
 {
 	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1057,9 +1055,8 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	const u32 attrib = grctx->attrib_nr;
 	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
 	int gpc, tpc;
 	u32 bo = 0;
 
@@ -1267,85 +1264,87 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_mc_unk260(device, 1);
 }
 
+#define CB_RESERVED 0x80000
+
 int
 gf100_grctx_generate(struct gf100_gr *gr)
 {
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	struct nvkm_memory *chan;
+	struct nvkm_memory *inst = NULL;
+	struct nvkm_memory *data = NULL;
+	struct nvkm_vmm *vmm = NULL;
+	struct nvkm_vma *ctx = NULL;
 	struct gf100_grctx info;
 	int ret, i;
 	u64 addr;
 
-	/* allocate memory to for a "channel", which we'll use to generate
-	 * the default context values
+	/* Allocate memory to for a "channel", which we'll use to generate
+	 * the default context values.
 	 */
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
-			      0x1000, true, &chan);
-	if (ret) {
-		nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
-		return ret;
-	}
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      0x1000, 0x1000, true, &inst);
+	if (ret)
+		goto done;
 
-	addr = nvkm_memory_addr(chan);
+	ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "grctx", &vmm);
+	if (ret)
+		goto done;
 
-	/* PGD pointer */
-	nvkm_kmap(chan);
-	nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
-	nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
-	nvkm_wo32(chan, 0x0208, 0xffffffff);
-	nvkm_wo32(chan, 0x020c, 0x000000ff);
+	vmm->debug = subdev->debug;
 
-	/* PGT[0] pointer */
-	nvkm_wo32(chan, 0x1000, 0x00000000);
-	nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
+	ret = nvkm_vmm_join(vmm, inst);
+	if (ret)
+		goto done;
 
-	/* identity-map the whole "channel" into its own vm */
-	for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
-		u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
-		nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
-		nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
-	}
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      CB_RESERVED + gr->size, 0, true, &data);
+	if (ret)
+		goto done;
 
-	/* context pointer (virt) */
-	nvkm_wo32(chan, 0x0210, 0x00080004);
-	nvkm_wo32(chan, 0x0214, 0x00000000);
-	nvkm_done(chan);
+	ret = nvkm_vmm_get(vmm, 0, nvkm_memory_size(data), &ctx);
+	if (ret)
+		goto done;
 
-	nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
-	nvkm_wr32(device, 0x100cbc, 0x80000001);
-	nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
-			break;
-	);
+	ret = nvkm_memory_map(data, 0, vmm, ctx, NULL, 0);
+	if (ret)
+		goto done;
+
+
+	/* Setup context pointer. */
+	nvkm_kmap(inst);
+	nvkm_wo32(inst, 0x0210, lower_32_bits(ctx->addr + CB_RESERVED) | 4);
+	nvkm_wo32(inst, 0x0214, upper_32_bits(ctx->addr + CB_RESERVED));
+	nvkm_done(inst);
 
-	/* setup default state for mmio list construction */
+	/* Setup default state for mmio list construction. */
 	info.gr = gr;
 	info.data = gr->mmio_data;
 	info.mmio = gr->mmio_list;
-	info.addr = 0x2000 + (i * 8);
+	info.addr = ctx->addr;
 	info.buffer_nr = 0;
 
-	/* make channel current */
+	/* Make channel current. */
+	addr = nvkm_memory_addr(inst) >> 12;
 	if (gr->firmware) {
 		nvkm_wr32(device, 0x409840, 0x00000030);
-		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr);
 		nvkm_wr32(device, 0x409504, 0x00000003);
 		nvkm_msec(device, 2000,
 			if (nvkm_rd32(device, 0x409800) & 0x00000010)
 				break;
 		);
 
-		nvkm_kmap(chan);
-		nvkm_wo32(chan, 0x8001c, 1);
-		nvkm_wo32(chan, 0x80020, 0);
-		nvkm_wo32(chan, 0x80028, 0);
-		nvkm_wo32(chan, 0x8002c, 0);
-		nvkm_done(chan);
+		nvkm_kmap(data);
+		nvkm_wo32(data, 0x1c, 1);
+		nvkm_wo32(data, 0x20, 0);
+		nvkm_wo32(data, 0x28, 0);
+		nvkm_wo32(data, 0x2c, 0);
+		nvkm_done(data);
 	} else {
 		nvkm_wr32(device, 0x409840, 0x80000000);
-		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr);
 		nvkm_wr32(device, 0x409504, 0x00000001);
 		nvkm_msec(device, 2000,
 			if (nvkm_rd32(device, 0x409800) & 0x80000000)
@@ -1355,8 +1354,8 @@ gf100_grctx_generate(struct gf100_gr *gr)
 
 	grctx->main(gr, &info);
 
-	/* trigger a context unload by unsetting the "next channel valid" bit
-	 * and faking a context switch interrupt
+	/* Trigger a context unload by unsetting the "next channel valid" bit
+	 * and faking a context switch interrupt.
 	 */
 	nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
 	nvkm_wr32(device, 0x409000, 0x00000100);
@@ -1370,17 +1369,21 @@ gf100_grctx_generate(struct gf100_gr *gr)
 
 	gr->data = kmalloc(gr->size, GFP_KERNEL);
 	if (gr->data) {
-		nvkm_kmap(chan);
+		nvkm_kmap(data);
 		for (i = 0; i < gr->size; i += 4)
-			gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
-		nvkm_done(chan);
+			gr->data[i / 4] = nvkm_ro32(data, CB_RESERVED + i);
+		nvkm_done(data);
 		ret = 0;
 	} else {
 		ret = -ENOMEM;
 	}
 
 done:
-	nvkm_memory_del(&chan);
+	nvkm_vmm_put(vmm, &ctx);
+	nvkm_memory_unref(&data);
+	nvkm_vmm_part(vmm, inst);
+	nvkm_vmm_unref(&vmm);
+	nvkm_memory_unref(&inst);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 017180d147cf5..4731e56fbb115 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -11,7 +11,7 @@ struct gf100_grctx {
 	u64 addr;
 };
 
-int  gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, u32 access);
+int  gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, bool priv);
 void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int);
 
 #define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d))
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 505cdcbfc0855..82f71b10c06ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -735,9 +735,8 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32  alpha = grctx->alpha_nr;
 	const u32   beta = grctx->attrib_nr;
 	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 74a64e3fd59ac..19301d88577dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -187,9 +187,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32  alpha = grctx->alpha_nr;
 	const u32   beta = grctx->attrib_nr;
 	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index c46b3fdf72031..825c8fd500bc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -847,9 +847,8 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
 	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
 				    grctx->bundle_size / 0x20);
 	const u32 token_limit = grctx->bundle_token_limit;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
 	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -861,9 +860,8 @@ void
 gk104_grctx_generate_pagepool(struct gf100_grctx *info)
 {
 	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 4c4b5ab6e46d1..9b43d4ce3eaa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -867,9 +867,8 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
 	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
 				    grctx->bundle_size / 0x20);
 	const u32 token_limit = grctx->bundle_token_limit;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
 	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418e24, 0x00000000, s, b);
@@ -881,9 +880,8 @@ void
 gm107_grctx_generate_pagepool(struct gf100_grctx *info)
 {
 	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -900,9 +898,8 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32  alpha = grctx->alpha_nr;
 	const u32 attrib = grctx->attrib_nr;
 	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
 	const int max_batches = 0xffff;
 	u32 bo = 0;
 	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 7833bc777a295..88ea322d956c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -33,9 +33,8 @@ void
 gp100_grctx_generate_pagepool(struct gf100_grctx *info)
 {
 	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
-	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -51,9 +50,8 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32 attrib = grctx->attrib_nr;
 	const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size, (1 << s), access);
+	const int b = mmio_vram(info, size, (1 << s), false);
 	const int max_batches = 0xffff;
 	u32 ao = 0;
 	u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 80b7ab0bee3a0..7a66b4c2eb188 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -38,9 +38,8 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
 	const u32 attrib = grctx->attrib_nr;
 	const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
-	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size, (1 << s), access);
+	const int b = mmio_vram(info, size, (1 << s), false);
 	const int max_batches = 0xffff;
 	u32 ao = 0;
 	u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 99689f4de502d..2f8dc107047dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -37,6 +37,7 @@
 
 #include <nvif/class.h>
 #include <nvif/cl9097.h>
+#include <nvif/if900d.h>
 #include <nvif/unpack.h>
 
 /*******************************************************************************
@@ -327,13 +328,13 @@ gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
 
 	if (!gr->firmware) {
 		nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
-		nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+		nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8);
 	} else {
 		nvkm_wo32(*pgpuobj, 0xf4, 0);
 		nvkm_wo32(*pgpuobj, 0xf8, 0);
 		nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
-		nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
-		nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr));
+		nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr));
 		nvkm_wo32(*pgpuobj, 0x1c, 1);
 		nvkm_wo32(*pgpuobj, 0x20, 0);
 		nvkm_wo32(*pgpuobj, 0x28, 0);
@@ -350,18 +351,13 @@ gf100_gr_chan_dtor(struct nvkm_object *object)
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
-		if (chan->data[i].vma.node) {
-			nvkm_vm_unmap(&chan->data[i].vma);
-			nvkm_vm_put(&chan->data[i].vma);
-		}
-		nvkm_memory_del(&chan->data[i].mem);
+		nvkm_vmm_put(chan->vmm, &chan->data[i].vma);
+		nvkm_memory_unref(&chan->data[i].mem);
 	}
 
-	if (chan->mmio_vma.node) {
-		nvkm_vm_unmap(&chan->mmio_vma);
-		nvkm_vm_put(&chan->mmio_vma);
-	}
-	nvkm_memory_del(&chan->mmio);
+	nvkm_vmm_put(chan->vmm, &chan->mmio_vma);
+	nvkm_memory_unref(&chan->mmio);
+	nvkm_vmm_unref(&chan->vmm);
 	return chan;
 }
 
@@ -380,6 +376,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 	struct gf100_gr_data *data = gr->mmio_data;
 	struct gf100_gr_mmio *mmio = gr->mmio_list;
 	struct gf100_gr_chan *chan;
+	struct gf100_vmm_map_v0 args = { .priv = 1 };
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, i;
 
@@ -387,6 +384,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 		return -ENOMEM;
 	nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
 	chan->gr = gr;
+	chan->vmm = nvkm_vmm_ref(fifoch->vmm);
 	*pobject = &chan->object;
 
 	/* allocate memory for a "mmio list" buffer that's used by the HUB
@@ -398,12 +396,14 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
-			  NV_MEM_ACCESS_SYS, &chan->mmio_vma);
+	ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma);
 	if (ret)
 		return ret;
 
-	nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+	ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm,
+			      chan->mmio_vma, &args, sizeof(args));
+	if (ret)
+		return ret;
 
 	/* allocate buffers referenced by mmio list */
 	for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
@@ -413,13 +413,19 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 		if (ret)
 			return ret;
 
-		ret = nvkm_vm_get(fifoch->vm,
-				  nvkm_memory_size(chan->data[i].mem), 12,
-				  data->access, &chan->data[i].vma);
+		ret = nvkm_vmm_get(fifoch->vmm, 12,
+				   nvkm_memory_size(chan->data[i].mem),
+				   &chan->data[i].vma);
+		if (ret)
+			return ret;
+
+		args.priv = data->priv;
+
+		ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm,
+				      chan->data[i].vma, &args, sizeof(args));
 		if (ret)
 			return ret;
 
-		nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
 		data++;
 	}
 
@@ -430,7 +436,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
 		u32 data = mmio->data;
 
 		if (mmio->buffer >= 0) {
-			u64 info = chan->data[mmio->buffer].vma.offset;
+			u64 info = chan->data[mmio->buffer].vma->addr;
 			data |= info >> mmio->shift;
 		}
 
@@ -1855,8 +1861,12 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
 	int ret;
 
 	ret = nvkm_firmware_get(device, fwname, &fw);
-	if (ret)
-		return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+	if (ret) {
+		ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+		if (ret)
+			return -ENODEV;
+		return 0;
+	}
 
 	fuc->size = fw->size;
 	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
@@ -1903,25 +1913,33 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
+
+	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001);
+	nvkm_wr32(device, 0x4188a4, 0x00000000);
+	nvkm_wr32(device, 0x418888, 0x00000000);
+	nvkm_wr32(device, 0x41888c, 0x00000000);
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8);
+	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
+}
+
 int
 gf100_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	struct nvkm_fb *fb = device->fb;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
 	int i;
 
-	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
-	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+	gr->func->init_gpc_mmu(gr);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -2036,6 +2054,7 @@ gf100_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gf100_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index a36e45a4a635c..d7c2adb9b543c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -45,7 +45,7 @@
 struct gf100_gr_data {
 	u32 size;
 	u32 align;
-	u32 access;
+	bool priv;
 };
 
 struct gf100_gr_mmio {
@@ -156,18 +156,20 @@ int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+#include <core/object.h>
 
 struct gf100_gr_chan {
 	struct nvkm_object object;
 	struct gf100_gr *gr;
+	struct nvkm_vmm *vmm;
 
 	struct nvkm_memory *mmio;
-	struct nvkm_vma mmio_vma;
+	struct nvkm_vma *mmio_vma;
 	int mmio_nr;
 
 	struct {
 		struct nvkm_memory *mem;
-		struct nvkm_vma vma;
+		struct nvkm_vma *vma;
 	} data[4];
 };
 
@@ -253,6 +255,7 @@ extern const struct gf100_gr_init gf100_gr_init_mpc_0[];
 extern const struct gf100_gr_init gf100_gr_init_be_0[];
 extern const struct gf100_gr_init gf100_gr_init_fe_1[];
 extern const struct gf100_gr_init gf100_gr_init_pe_1[];
+void gf100_gr_init_gpc_mmu(struct gf100_gr *);
 
 extern const struct gf100_gr_init gf104_gr_init_ds_0[];
 extern const struct gf100_gr_init gf104_gr_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index d736dcd55ea20..ec0f11983b233 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -115,6 +115,7 @@ gf104_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf104_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 2f0d24498427b..cc152eb741235 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -106,6 +106,7 @@ gf108_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf108_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d1d942eb86af2..10d2d73ca8c3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -87,6 +87,7 @@ gf110_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf110_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 0124e468086eb..ac09a07c4150b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -123,6 +123,7 @@ gf117_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gf117_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 8d8e4cafe28f8..7f449ec6f7603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -178,6 +178,7 @@ gf119_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf119_gr = {
 	.init = gf100_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index ec22da6c99fc4..5e82f94c22451 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,8 +24,6 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-#include <subdev/fb.h>
-
 #include <nvif/class.h>
 
 /*******************************************************************************
@@ -207,21 +205,13 @@ int
 gk104_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	struct nvkm_fb *fb = device->fb;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
 	int i;
 
-	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
-	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+	gr->func->init_gpc_mmu(gr);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -339,6 +329,7 @@ gk104_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gk104_gr = {
 	.init = gk104_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f31b171a4102d..a38e19b61c1da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -183,6 +183,7 @@ gk110_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gk110_gr = {
 	.init = gk104_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index d76dd178007fb..1912c0bfd7eed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,6 +103,7 @@ gk110b_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gk110b_gr = {
 	.init = gk104_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 14bbe6ed02a9a..1fc258163f25a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,6 +162,7 @@ gk208_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gk208_gr = {
 	.init = gk104_gr_init,
+	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d1dc92999dc00..d6840dc81a295 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -59,7 +59,7 @@ void *
 nv20_gr_chan_dtor(struct nvkm_object *object)
 {
 	struct nv20_gr_chan *chan = nv20_gr_chan(object);
-	nvkm_memory_del(&chan->inst);
+	nvkm_memory_unref(&chan->inst);
 	return chan;
 }
 
@@ -323,7 +323,7 @@ void *
 nv20_gr_dtor(struct nvkm_gr *base)
 {
 	struct nv20_gr *gr = nv20_gr(base);
-	nvkm_memory_del(&gr->ctxtab);
+	nvkm_memory_unref(&gr->ctxtab);
 	return gr;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index cdf4501e3798d..d0cb2b8846ec6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -19,6 +19,7 @@ void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
 int nv30_gr_init(struct nvkm_gr *);
 
 #define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
+#include <core/object.h>
 
 struct nv20_gr_chan {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index 2812ed11f877b..bee8ef2d56974 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -16,6 +16,7 @@ void nv40_gr_intr(struct nvkm_gr *);
 u64 nv40_gr_units(struct nvkm_gr *);
 
 #define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+#include <core/object.h>
 
 struct nv40_gr_chan {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 45eec83a5969b..1ab6ea436b703 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -19,6 +19,7 @@ u64 nv50_gr_units(struct nvkm_gr *);
 int g84_gr_tlb_flush(struct nvkm_gr *);
 
 #define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+#include <core/object.h>
 
 struct nv50_gr_chan {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index d3bb34fcdebfe..f0d35beb58df2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -18,6 +18,7 @@ struct nv31_mpeg_func {
 };
 
 #define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+#include <core/object.h>
 
 struct nv31_mpeg_chan {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 4e528851e9c00..6df880a390193 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -24,6 +24,7 @@
 #include "priv.h"
 
 #include <core/gpuobj.h>
+#include <core/object.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index d7b81cbf82b50..4ff0475e776c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -67,6 +67,7 @@ struct nvkm_specdom {
 };
 
 #define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
+#include <core/object.h>
 
 struct nvkm_perfdom {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index 6608bf6c68421..b5be49f0ac562 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -1,9 +1,11 @@
 #ifndef __NVKM_SW_CHAN_H__
 #define __NVKM_SW_CHAN_H__
 #define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
-#include "priv.h"
+#include <core/object.h>
 #include <core/event.h>
 
+#include "priv.h"
+
 struct nvkm_sw_chan {
 	const struct nvkm_sw_chan_func *func;
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index 943ef4c10091c..bcfff62131fe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -1,7 +1,7 @@
 #ifndef __NVKM_NVSW_H__
 #define __NVKM_NVSW_H__
 #define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
-#include "priv.h"
+#include <core/object.h>
 
 struct nvkm_nvsw {
 	struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 06bdb67a02058..70549381e0828 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -86,7 +86,7 @@ nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
 	nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
 
 	if (!suspend)
-		nvkm_memory_del(&xtensa->gpu_fw);
+		nvkm_memory_unref(&xtensa->gpu_fw);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 1b7f48efd8b12..14be41f24155a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
 }
 
 void
-nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
+nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
 {
 	if (!falcon->func->bind_context) {
 		nvkm_error(falcon->user,
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 669c240284709..9def926f24d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
 }
 
 static void
-nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
+nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
 {
 	u32 inst_loc;
 	u32 fbif;
@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
 
 	/* Set context */
-	switch (nvkm_memory_target(ctx->memory)) {
+	switch (nvkm_memory_target(ctx)) {
 	case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
 	case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
 	case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
 	/* Enable context */
 	nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
 	nvkm_falcon_wr32(falcon, 0x054,
-			 ((ctx->addr >> 12) & 0xfffffff) |
+			 ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
 			 (inst_loc << 28) | (1 << 30));
 
 	nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1e138b3379558..e5830453813d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/subdev/bar/nv50.o
 nvkm-y += nvkm/subdev/bar/g84.o
 nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
+nvkm-y += nvkm/subdev/bar/gm107.o
+nvkm-y += nvkm/subdev/bar/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index c561d148cebcd..9646adec57cbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -30,39 +30,76 @@ nvkm_bar_flush(struct nvkm_bar *bar)
 		bar->func->flush(bar);
 }
 
-struct nvkm_vm *
-nvkm_bar_kmap(struct nvkm_bar *bar)
+struct nvkm_vmm *
+nvkm_bar_bar1_vmm(struct nvkm_device *device)
 {
-	/* disallow kmap() until after vm has been bootstrapped */
-	if (bar && bar->func->kmap && bar->subdev.oneinit)
-		return bar->func->kmap(bar);
+	return device->bar->func->bar1.vmm(device->bar);
+}
+
+struct nvkm_vmm *
+nvkm_bar_bar2_vmm(struct nvkm_device *device)
+{
+	/* Denies access to BAR2 when it's not initialised, used by INSTMEM
+	 * to know when object access needs to go through the BAR0 window.
+	 */
+	struct nvkm_bar *bar = device->bar;
+	if (bar && bar->bar2)
+		return bar->func->bar2.vmm(bar);
 	return NULL;
 }
 
-int
-nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+void
+nvkm_bar_bar2_fini(struct nvkm_device *device)
 {
-	return bar->func->umap(bar, size, type, vma);
+	struct nvkm_bar *bar = device->bar;
+	if (bar && bar->bar2) {
+		bar->func->bar2.fini(bar);
+		bar->bar2 = false;
+	}
+}
+
+void
+nvkm_bar_bar2_init(struct nvkm_device *device)
+{
+	struct nvkm_bar *bar = device->bar;
+	if (bar && bar->subdev.oneinit && !bar->bar2 && bar->func->bar2.init) {
+		bar->func->bar2.init(bar);
+		bar->func->bar2.wait(bar);
+		bar->bar2 = true;
+	}
 }
 
 static int
-nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
 {
 	struct nvkm_bar *bar = nvkm_bar(subdev);
-	return bar->func->oneinit(bar);
+	bar->func->bar1.fini(bar);
+	return 0;
 }
 
 static int
 nvkm_bar_init(struct nvkm_subdev *subdev)
 {
 	struct nvkm_bar *bar = nvkm_bar(subdev);
-	return bar->func->init(bar);
+	bar->func->bar1.init(bar);
+	bar->func->bar1.wait(bar);
+	if (bar->func->init)
+		bar->func->init(bar);
+	return 0;
+}
+
+static int
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->oneinit(bar);
 }
 
 static void *
 nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_bar *bar = nvkm_bar(subdev);
+	nvkm_bar_bar2_fini(subdev->device);
 	return bar->func->dtor(bar);
 }
 
@@ -71,6 +108,7 @@ nvkm_bar = {
 	.dtor = nvkm_bar_dtor,
 	.oneinit = nvkm_bar_oneinit,
 	.init = nvkm_bar_init,
+	.fini = nvkm_bar_fini,
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index ef717136c838c..87f26f54b4818 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -44,8 +44,14 @@ g84_bar_func = {
 	.dtor = nv50_bar_dtor,
 	.oneinit = nv50_bar_oneinit,
 	.init = nv50_bar_init,
-	.kmap = nv50_bar_kmap,
-	.umap = nv50_bar_umap,
+	.bar1.init = nv50_bar_bar1_init,
+	.bar1.fini = nv50_bar_bar1_fini,
+	.bar1.wait = nv50_bar_bar1_wait,
+	.bar1.vmm = nv50_bar_bar1_vmm,
+	.bar2.init = nv50_bar_bar2_init,
+	.bar2.fini = nv50_bar_bar2_fini,
+	.bar2.wait = nv50_bar_bar1_wait,
+	.bar2.vmm = nv50_bar_bar2_vmm,
 	.flush = g84_bar_flush,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 676c167c95b98..a3ba7f50198be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -23,39 +23,73 @@
  */
 #include "gf100.h"
 
-#include <core/gpuobj.h>
+#include <core/memory.h>
 #include <core/option.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 
-static struct nvkm_vm *
-gf100_bar_kmap(struct nvkm_bar *base)
+struct nvkm_vmm *
+gf100_bar_bar1_vmm(struct nvkm_bar *base)
 {
-	return gf100_bar(base)->bar[0].vm;
+	return gf100_bar(base)->bar[1].vmm;
 }
 
-int
-gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+void
+gf100_bar_bar1_wait(struct nvkm_bar *base)
+{
+	/* NFI why it's twice. */
+	nvkm_bar_flush(base);
+	nvkm_bar_flush(base);
+}
+
+void
+gf100_bar_bar1_fini(struct nvkm_bar *bar)
 {
+	nvkm_mask(bar->subdev.device, 0x001704, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar1_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
 	struct gf100_bar *bar = gf100_bar(base);
-	return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
+	const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+	nvkm_wr32(device, 0x001704, 0x80000000 | addr);
+}
+
+struct nvkm_vmm *
+gf100_bar_bar2_vmm(struct nvkm_bar *base)
+{
+	return gf100_bar(base)->bar[0].vmm;
+}
+
+void
+gf100_bar_bar2_fini(struct nvkm_bar *bar)
+{
+	nvkm_mask(bar->subdev.device, 0x001714, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar2_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
+	struct gf100_bar *bar = gf100_bar(base);
+	u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+	if (bar->bar2_halve)
+		addr |= 0x40000000;
+	nvkm_wr32(device, 0x001714, 0x80000000 | addr);
 }
 
 static int
-gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
-		  struct lock_class_key *key, int bar_nr)
+gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
+		      struct lock_class_key *key, int bar_nr)
 {
 	struct nvkm_device *device = bar->base.subdev.device;
-	struct nvkm_vm *vm;
 	resource_size_t bar_len;
 	int ret;
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
-			      &bar_vm->mem);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
+			      &bar_vm->inst);
 	if (ret)
 		return ret;
 
@@ -63,98 +97,64 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
 	if (bar_nr == 3 && bar->bar2_halve)
 		bar_len >>= 1;
 
-	ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
+	ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
+			   (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+	atomic_inc(&bar_vm->vmm->engref[NVKM_SUBDEV_BAR]);
+	bar_vm->vmm->debug = bar->base.subdev.debug;
 
 	/*
 	 * Bootstrap page table lookup.
 	 */
 	if (bar_nr == 3) {
-		ret = nvkm_vm_boot(vm, bar_len);
-		if (ret) {
-			nvkm_vm_ref(NULL, &vm, NULL);
+		ret = nvkm_vmm_boot(bar_vm->vmm);
+		if (ret)
 			return ret;
-		}
 	}
 
-	ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
-	nvkm_vm_ref(NULL, &vm, NULL);
-	if (ret)
-		return ret;
-
-	nvkm_kmap(bar_vm->mem);
-	nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
-	nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
-	nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
-	nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
-	nvkm_done(bar_vm->mem);
-	return 0;
+	return nvkm_vmm_join(bar_vm->vmm, bar_vm->inst);
 }
 
 int
 gf100_bar_oneinit(struct nvkm_bar *base)
 {
 	static struct lock_class_key bar1_lock;
-	static struct lock_class_key bar3_lock;
+	static struct lock_class_key bar2_lock;
 	struct gf100_bar *bar = gf100_bar(base);
 	int ret;
 
-	/* BAR3 */
-	if (bar->base.func->kmap) {
-		ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
+	/* BAR2 */
+	if (bar->base.func->bar2.init) {
+		ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
 		if (ret)
 			return ret;
+
+		bar->base.subdev.oneinit = true;
+		nvkm_bar_bar2_init(bar->base.subdev.device);
 	}
 
 	/* BAR1 */
-	ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
+	ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-int
-gf100_bar_init(struct nvkm_bar *base)
-{
-	struct gf100_bar *bar = gf100_bar(base);
-	struct nvkm_device *device = bar->base.subdev.device;
-	u32 addr;
-
-	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
-	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
-
-	addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
-	nvkm_wr32(device, 0x001704, 0x80000000 | addr);
-
-	if (bar->bar[0].mem) {
-		addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
-		if (bar->bar2_halve)
-			addr |= 0x40000000;
-		nvkm_wr32(device, 0x001714, 0x80000000 | addr);
-	}
-
-	return 0;
-}
-
 void *
 gf100_bar_dtor(struct nvkm_bar *base)
 {
 	struct gf100_bar *bar = gf100_bar(base);
 
-	nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
-	nvkm_gpuobj_del(&bar->bar[1].pgd);
-	nvkm_memory_del(&bar->bar[1].mem);
+	nvkm_vmm_part(bar->bar[1].vmm, bar->bar[1].inst);
+	nvkm_vmm_unref(&bar->bar[1].vmm);
+	nvkm_memory_unref(&bar->bar[1].inst);
 
-	if (bar->bar[0].vm) {
-		nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
-		nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
-	}
-	nvkm_gpuobj_del(&bar->bar[0].pgd);
-	nvkm_memory_del(&bar->bar[0].mem);
+	nvkm_vmm_part(bar->bar[0].vmm, bar->bar[0].inst);
+	nvkm_vmm_unref(&bar->bar[0].vmm);
+	nvkm_memory_unref(&bar->bar[0].inst);
 	return bar;
 }
 
@@ -175,9 +175,14 @@ static const struct nvkm_bar_func
 gf100_bar_func = {
 	.dtor = gf100_bar_dtor,
 	.oneinit = gf100_bar_oneinit,
-	.init = gf100_bar_init,
-	.kmap = gf100_bar_kmap,
-	.umap = gf100_bar_umap,
+	.bar1.init = gf100_bar_bar1_init,
+	.bar1.fini = gf100_bar_bar1_fini,
+	.bar1.wait = gf100_bar_bar1_wait,
+	.bar1.vmm = gf100_bar_bar1_vmm,
+	.bar2.init = gf100_bar_bar2_init,
+	.bar2.fini = gf100_bar_bar2_fini,
+	.bar2.wait = gf100_bar_bar1_wait,
+	.bar2.vmm = gf100_bar_bar2_vmm,
 	.flush = g84_bar_flush,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 20a5255362baf..e4da39139e959 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -3,22 +3,24 @@
 #define gf100_bar(p) container_of((p), struct gf100_bar, base)
 #include "priv.h"
 
-struct gf100_bar_vm {
-	struct nvkm_memory *mem;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
+struct gf100_barN {
+	struct nvkm_memory *inst;
+	struct nvkm_vmm *vmm;
 };
 
 struct gf100_bar {
 	struct nvkm_bar base;
 	bool bar2_halve;
-	struct gf100_bar_vm bar[2];
+	struct gf100_barN bar[2];
 };
 
 int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
 		   int, struct nvkm_bar **);
 void *gf100_bar_dtor(struct nvkm_bar *);
 int gf100_bar_oneinit(struct nvkm_bar *);
-int gf100_bar_init(struct nvkm_bar *);
-int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void gf100_bar_bar1_init(struct nvkm_bar *);
+void gf100_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar1_vmm(struct nvkm_bar *);
+void gf100_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar2_vmm(struct nvkm_bar *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 9232fab4274c8..b10077d38839b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -25,8 +25,10 @@ static const struct nvkm_bar_func
 gk20a_bar_func = {
 	.dtor = gf100_bar_dtor,
 	.oneinit = gf100_bar_oneinit,
-	.init = gf100_bar_init,
-	.umap = gf100_bar_umap,
+	.bar1.init = gf100_bar_bar1_init,
+	.bar1.fini = gf100_bar_bar1_fini,
+	.bar1.wait = gf100_bar_bar1_wait,
+	.bar1.vmm = gf100_bar_bar1_vmm,
 	.flush = g84_bar_flush,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
new file mode 100644
index 0000000000000..3ddf9222d9351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/timer.h>
+
+void
+gm107_bar_bar1_wait(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001710) & 0x00000003))
+			break;
+	);
+}
+
+static void
+gm107_bar_bar2_wait(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001710) & 0x0000000c))
+			break;
+	);
+}
+
+static const struct nvkm_bar_func
+gm107_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.bar1.init = gf100_bar_bar1_init,
+	.bar1.fini = gf100_bar_bar1_fini,
+	.bar1.wait = gm107_bar_bar1_wait,
+	.bar1.vmm = gf100_bar_bar1_vmm,
+	.bar2.init = gf100_bar_bar2_init,
+	.bar2.fini = gf100_bar_bar2_fini,
+	.bar2.wait = gm107_bar_bar2_wait,
+	.bar2.vmm = gf100_bar_bar2_vmm,
+	.flush = g84_bar_flush,
+};
+
+int
+gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
new file mode 100644
index 0000000000000..950bff1955ad4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gm20b_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.bar1.init = gf100_bar_bar1_init,
+	.bar1.fini = gf100_bar_bar1_fini,
+	.bar1.wait = gm107_bar_bar1_wait,
+	.bar1.vmm = gf100_bar_bar1_vmm,
+	.flush = g84_bar_flush,
+};
+
+int
+gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+	if (ret == 0)
+		(*pbar)->iomap_uncached = true;
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 6eff637ac301a..157b076a12723 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -28,19 +28,6 @@
 #include <subdev/mmu.h>
 #include <subdev/timer.h>
 
-struct nvkm_vm *
-nv50_bar_kmap(struct nvkm_bar *base)
-{
-	return nv50_bar(base)->bar3_vm;
-}
-
-int
-nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
-{
-	struct nv50_bar *bar = nv50_bar(base);
-	return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
-}
-
 static void
 nv50_bar_flush(struct nvkm_bar *base)
 {
@@ -56,14 +43,72 @@ nv50_bar_flush(struct nvkm_bar *base)
 	spin_unlock_irqrestore(&bar->base.lock, flags);
 }
 
+struct nvkm_vmm *
+nv50_bar_bar1_vmm(struct nvkm_bar *base)
+{
+	return nv50_bar(base)->bar1_vmm;
+}
+
+void
+nv50_bar_bar1_wait(struct nvkm_bar *base)
+{
+	nvkm_bar_flush(base);
+}
+
+void
+nv50_bar_bar1_fini(struct nvkm_bar *bar)
+{
+	nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
+}
+
+void
+nv50_bar_bar1_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
+	struct nv50_bar *bar = nv50_bar(base);
+	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+}
+
+struct nvkm_vmm *
+nv50_bar_bar2_vmm(struct nvkm_bar *base)
+{
+	return nv50_bar(base)->bar2_vmm;
+}
+
+void
+nv50_bar_bar2_fini(struct nvkm_bar *bar)
+{
+	nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
+}
+
+void
+nv50_bar_bar2_init(struct nvkm_bar *base)
+{
+	struct nvkm_device *device = base->subdev.device;
+	struct nv50_bar *bar = nv50_bar(base);
+	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
+}
+
+void
+nv50_bar_init(struct nvkm_bar *base)
+{
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
+}
+
 int
 nv50_bar_oneinit(struct nvkm_bar *base)
 {
 	struct nv50_bar *bar = nv50_bar(base);
 	struct nvkm_device *device = bar->base.subdev.device;
 	static struct lock_class_key bar1_lock;
-	static struct lock_class_key bar3_lock;
-	struct nvkm_vm *vm;
+	static struct lock_class_key bar2_lock;
 	u64 start, limit;
 	int ret;
 
@@ -80,51 +125,54 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 	if (ret)
 		return ret;
 
-	/* BAR3 */
+	/* BAR2 */
 	start = 0x0100000000ULL;
 	limit = start + device->func->resource_size(device, 3);
 
-	ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
+	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+			   &bar2_lock, "bar2", &bar->bar2_vmm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+	atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
+	bar->bar2_vmm->debug = bar->base.subdev.debug;
 
-	ret = nvkm_vm_boot(vm, limit-- - start);
+	ret = nvkm_vmm_boot(bar->bar2_vmm);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
-	nvkm_vm_ref(NULL, &vm, NULL);
+	ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
 	if (ret)
 		return ret;
 
-	nvkm_kmap(bar->bar3);
-	nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
-	nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
-	nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
-	nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+	nvkm_kmap(bar->bar2);
+	nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
 				   upper_32_bits(start));
-	nvkm_wo32(bar->bar3, 0x10, 0x00000000);
-	nvkm_wo32(bar->bar3, 0x14, 0x00000000);
-	nvkm_done(bar->bar3);
+	nvkm_wo32(bar->bar2, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar2, 0x14, 0x00000000);
+	nvkm_done(bar->bar2);
+
+	bar->base.subdev.oneinit = true;
+	nvkm_bar_bar2_init(device);
 
 	/* BAR1 */
 	start = 0x0000000000ULL;
 	limit = start + device->func->resource_size(device, 1);
 
-	ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
-	if (ret)
-		return ret;
+	ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+			   &bar1_lock, "bar1", &bar->bar1_vmm);
 
-	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+	atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
+	bar->bar1_vmm->debug = bar->base.subdev.debug;
 
-	ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
-	nvkm_vm_ref(NULL, &vm, NULL);
+	ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
 	if (ret)
 		return ret;
 
@@ -144,45 +192,21 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 	return 0;
 }
 
-int
-nv50_bar_init(struct nvkm_bar *base)
-{
-	struct nv50_bar *bar = nv50_bar(base);
-	struct nvkm_device *device = bar->base.subdev.device;
-	int i;
-
-	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
-	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
-	nvkm_wr32(device, 0x100c80, 0x00060001);
-	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
-			break;
-	) < 0)
-		return -EBUSY;
-
-	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
-	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
-	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
-	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
-	return 0;
-}
-
 void *
 nv50_bar_dtor(struct nvkm_bar *base)
 {
 	struct nv50_bar *bar = nv50_bar(base);
-	nvkm_gpuobj_del(&bar->bar1);
-	nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
-	nvkm_gpuobj_del(&bar->bar3);
-	if (bar->bar3_vm) {
-		nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
-		nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+	if (bar->mem) {
+		nvkm_gpuobj_del(&bar->bar1);
+		nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
+		nvkm_vmm_unref(&bar->bar1_vmm);
+		nvkm_gpuobj_del(&bar->bar2);
+		nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
+		nvkm_vmm_unref(&bar->bar2_vmm);
+		nvkm_gpuobj_del(&bar->pgd);
+		nvkm_gpuobj_del(&bar->pad);
+		nvkm_gpuobj_del(&bar->mem);
 	}
-	nvkm_gpuobj_del(&bar->pgd);
-	nvkm_gpuobj_del(&bar->pad);
-	nvkm_gpuobj_del(&bar->mem);
 	return bar;
 }
 
@@ -204,8 +228,14 @@ nv50_bar_func = {
 	.dtor = nv50_bar_dtor,
 	.oneinit = nv50_bar_oneinit,
 	.init = nv50_bar_init,
-	.kmap = nv50_bar_kmap,
-	.umap = nv50_bar_umap,
+	.bar1.init = nv50_bar_bar1_init,
+	.bar1.fini = nv50_bar_bar1_fini,
+	.bar1.wait = nv50_bar_bar1_wait,
+	.bar1.vmm = nv50_bar_bar1_vmm,
+	.bar2.init = nv50_bar_bar2_init,
+	.bar2.fini = nv50_bar_bar2_fini,
+	.bar2.wait = nv50_bar_bar1_wait,
+	.bar2.vmm = nv50_bar_bar2_vmm,
 	.flush = nv50_bar_flush,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index 1eb764f22a499..140b76f588b6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -9,18 +9,20 @@ struct nv50_bar {
 	struct nvkm_gpuobj *mem;
 	struct nvkm_gpuobj *pad;
 	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *bar1_vm;
+	struct nvkm_vmm *bar1_vmm;
 	struct nvkm_gpuobj *bar1;
-	struct nvkm_vm *bar3_vm;
-	struct nvkm_gpuobj *bar3;
+	struct nvkm_vmm *bar2_vmm;
+	struct nvkm_gpuobj *bar2;
 };
 
 int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
 		  int, u32 pgd_addr, struct nvkm_bar **);
 void *nv50_bar_dtor(struct nvkm_bar *);
 int nv50_bar_oneinit(struct nvkm_bar *);
-int nv50_bar_init(struct nvkm_bar *);
-struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
-int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
-void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+void nv50_bar_init(struct nvkm_bar *);
+void nv50_bar_bar1_init(struct nvkm_bar *);
+void nv50_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar1_vmm(struct nvkm_bar *);
+void nv50_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar2_vmm(struct nvkm_bar *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index d834ef20db5bb..14398e2dbdf99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -9,11 +9,25 @@ void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
 struct nvkm_bar_func {
 	void *(*dtor)(struct nvkm_bar *);
 	int (*oneinit)(struct nvkm_bar *);
-	int (*init)(struct nvkm_bar *);
-	struct nvkm_vm *(*kmap)(struct nvkm_bar *);
-	int  (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+	void (*init)(struct nvkm_bar *);
+
+	struct {
+		void (*init)(struct nvkm_bar *);
+		void (*fini)(struct nvkm_bar *);
+		void (*wait)(struct nvkm_bar *);
+		struct nvkm_vmm *(*vmm)(struct nvkm_bar *);
+	} bar1, bar2;
+
 	void (*flush)(struct nvkm_bar *);
 };
 
+void nv50_bar_bar1_fini(struct nvkm_bar *);
+void nv50_bar_bar2_fini(struct nvkm_bar *);
+
 void g84_bar_flush(struct nvkm_bar *);
+
+void gf100_bar_bar1_fini(struct nvkm_bar *);
+void gf100_bar_bar2_fini(struct nvkm_bar *);
+
+void gm107_bar_bar1_wait(struct nvkm_bar *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 23caef8df17fb..73e463ed55c35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -99,7 +99,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
 			rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
 			res_start = 0x5;
 			break;
-		};
+		}
 
 		if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
 			continue;
@@ -115,7 +115,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
 		default:
 			rail->resistor_count = 0;
 			break;
-		};
+		}
 
 		for (r = 0; r < rail->resistor_count; ++r) {
 			rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index b58ee99f7bfc8..9cc10e438b3de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -36,6 +36,8 @@
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
 
+#include <linux/kernel.h>
+
 #define bioslog(lvl, fmt, args...) do {                                        \
 	nvkm_printk(init->subdev, lvl, info, "0x%08x[%c]: "fmt,                \
 		    init->offset, init_exec(init) ?                            \
@@ -2271,8 +2273,6 @@ static struct nvbios_init_opcode {
 	[0xaa] = { init_reserved },
 };
 
-#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
-
 int
 nvbios_exec(struct nvbios_init *init)
 {
@@ -2281,7 +2281,8 @@ nvbios_exec(struct nvbios_init *init)
 	init->nested++;
 	while (init->offset) {
 		u8 opcode = nvbios_rd08(bios, init->offset);
-		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
+		if (opcode >= ARRAY_SIZE(init_opcode) ||
+		    !init_opcode[opcode].exec) {
 			error("unknown opcode 0x%02x\n", opcode);
 			return -EINVAL;
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index a7049c041594b..73b5d46104bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -31,12 +31,6 @@
 #include <engine/gr.h>
 #include <engine/mpeg.h>
 
-bool
-nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
-	return fb->func->memtype_valid(fb, memtype);
-}
-
 void
 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
 {
@@ -100,6 +94,7 @@ static int
 nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 {
 	struct nvkm_fb *fb = nvkm_fb(subdev);
+	u32 tags = 0;
 
 	if (fb->func->ram_new) {
 		int ret = fb->func->ram_new(fb, &fb->ram);
@@ -115,7 +110,16 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 			return ret;
 	}
 
-	return 0;
+	/* Initialise compression tag allocator.
+	 *
+	 * LTC oneinit() will override this on Fermi and newer.
+	 */
+	if (fb->func->tags) {
+		tags = fb->func->tags(fb);
+		nvkm_debug(subdev, "%d comptags\n", tags);
+	}
+
+	return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
 }
 
 static int
@@ -135,8 +139,13 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
 
 	if (fb->func->init)
 		fb->func->init(fb);
-	if (fb->func->init_page)
-		fb->func->init_page(fb);
+
+	if (fb->func->init_page) {
+		ret = fb->func->init_page(fb);
+		if (WARN_ON(ret))
+			return ret;
+	}
+
 	if (fb->func->init_unkn)
 		fb->func->init_unkn(fb);
 	return 0;
@@ -148,12 +157,13 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
 	struct nvkm_fb *fb = nvkm_fb(subdev);
 	int i;
 
-	nvkm_memory_del(&fb->mmu_wr);
-	nvkm_memory_del(&fb->mmu_rd);
+	nvkm_memory_unref(&fb->mmu_wr);
+	nvkm_memory_unref(&fb->mmu_rd);
 
 	for (i = 0; i < fb->tile.regions; i++)
 		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
 
+	nvkm_mm_fini(&fb->tags);
 	nvkm_ram_del(&fb->ram);
 
 	if (fb->func->dtor)
@@ -176,7 +186,8 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
 	nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
 	fb->func = func;
 	fb->tile.regions = fb->func->tile.regions;
-	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
+	fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
+				fb->func->default_bigpage);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 9c28392d07e43..06bf95c0c5493 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -27,6 +27,7 @@
 static const struct nv50_fb_func
 g84_fb = {
 	.ram_new = nv50_ram_new,
+	.tags = nv20_fb_tags,
 	.trap = 0x001d07ff,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index a239e73562c81..47d28c2797078 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -27,15 +27,6 @@
 #include <core/memory.h>
 #include <core/option.h>
 
-extern const u8 gf100_pte_storage_type_map[256];
-
-bool
-gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
-	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
-	return likely((gf100_pte_storage_type_map[memtype] != 0xff));
-}
-
 void
 gf100_fb_intr(struct nvkm_fb *base)
 {
@@ -80,20 +71,17 @@ gf100_fb_oneinit(struct nvkm_fb *base)
 	return 0;
 }
 
-void
+int
 gf100_fb_init_page(struct nvkm_fb *fb)
 {
 	struct nvkm_device *device = fb->subdev.device;
 	switch (fb->page) {
-	case 16:
-		nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
-		break;
-	case 17:
+	case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
+	case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
 	default:
-		nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
-		fb->page = 17;
-		break;
+		return -EINVAL;
 	}
+	return 0;
 }
 
 void
@@ -143,7 +131,7 @@ gf100_fb = {
 	.init_page = gf100_fb_init_page,
 	.intr = gf100_fb_intr,
 	.ram_new = gf100_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 17,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 412eb89834e8f..e3cf0515bb70a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -17,7 +17,5 @@ void gf100_fb_intr(struct nvkm_fb *);
 
 void gp100_fb_init(struct nvkm_fb *);
 
-void gm200_fb_init_page(struct nvkm_fb *fb);
 void gm200_fb_init(struct nvkm_fb *base);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 56af84aa333b0..4a9f463745b53 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -32,7 +32,7 @@ gf108_fb = {
 	.init_page = gf100_fb_init_page,
 	.intr = gf100_fb_intr,
 	.ram_new = gf108_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 17,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 4245e2e6e6045..0a6e8eaad42ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -32,7 +32,7 @@ gk104_fb = {
 	.init_page = gf100_fb_init_page,
 	.intr = gf100_fb_intr,
 	.ram_new = gk104_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 17,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 5d34d6136616a..a7e29b1250941 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -30,7 +30,7 @@ gk20a_fb = {
 	.init = gf100_fb_init,
 	.init_page = gf100_fb_init_page,
 	.intr = gf100_fb_intr,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 17,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index db699025f5464..69c876d5d1c11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -32,7 +32,7 @@ gm107_fb = {
 	.init_page = gf100_fb_init_page,
 	.intr = gf100_fb_intr,
 	.ram_new = gm107_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 17,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d83da5ddbc1e8..8137e19d32923 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,22 +26,18 @@
 
 #include <core/memory.h>
 
-void
+int
 gm200_fb_init_page(struct nvkm_fb *fb)
 {
 	struct nvkm_device *device = fb->subdev.device;
 	switch (fb->page) {
-	case 16:
-		nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
-		break;
-	case 17:
-		nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
-		break;
+	case 16: nvkm_mask(device, 0x100c80, 0x00001801, 0x00001001); break;
+	case 17: nvkm_mask(device, 0x100c80, 0x00001801, 0x00000000); break;
+	case  0: nvkm_mask(device, 0x100c80, 0x00001800, 0x00001800); break;
 	default:
-		nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
-		fb->page = 0;
-		break;
+		return -EINVAL;
 	}
+	return 0;
 }
 
 void
@@ -69,7 +65,7 @@ gm200_fb = {
 	.init_page = gm200_fb_init_page,
 	.intr = gf100_fb_intr,
 	.ram_new = gm200_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 0 /* per-instance. */,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index b87c233bcd6db..12db61e31128c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -30,7 +30,7 @@ gm20b_fb = {
 	.init = gm200_fb_init,
 	.init_page = gm200_fb_init_page,
 	.intr = gf100_fb_intr,
-	.memtype_valid = gf100_fb_memtype_valid,
+	.default_bigpage = 0 /* per-instance. */,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 98474aec19216..147f69b30cd82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -59,7 +59,6 @@ gp100_fb = {
 	.init_page = gm200_fb_init_page,
 	.init_unkn = gp100_fb_init_unkn,
 	.ram_new = gp100_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 73b4ae1c73dcf..b84b9861ef269 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -33,7 +33,6 @@ gp102_fb = {
 	.init = gp100_fb_init,
 	.init_page = gm200_fb_init_page,
 	.ram_new = gp100_ram_new,
-	.memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index f2b1fbf428d51..af8e43979dc11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -28,7 +28,6 @@ gp10b_fb = {
 	.init = gm200_fb_init,
 	.init_page = gm200_fb_init_page,
 	.intr = gf100_fb_intr,
-	.memtype_valid = gf100_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index ebb30608d5efa..9266559b45f97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -27,6 +27,7 @@
 static const struct nv50_fb_func
 gt215_fb = {
 	.ram_new = gt215_ram_new,
+	.tags = nv20_fb_tags,
 	.trap = 0x000d0fff,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index 8ff2e5db4571c..c886664533c81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -25,14 +25,6 @@
 #include "ram.h"
 #include "regsnv04.h"
 
-bool
-nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
-	if (!(tile_flags & 0xff00))
-		return true;
-	return false;
-}
-
 static void
 nv04_fb_init(struct nvkm_fb *fb)
 {
@@ -49,7 +41,6 @@ static const struct nvkm_fb_func
 nv04_fb = {
 	.init = nv04_fb_init,
 	.ram_new = nv04_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index e8c44f5a3d845..c998b7e96aa3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -61,7 +61,6 @@ nv10_fb = {
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
 	.ram_new = nv10_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 2ae0beb87567d..7b9f04f44af89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -33,7 +33,6 @@ nv1a_fb = {
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
 	.ram_new = nv1a_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index 126865dfe7773..a021d21ff153a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
 		else              tile->zcomp = 0x04000000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 	tile->limit = 0;
 	tile->pitch = 0;
 	tile->zcomp = 0;
-	nvkm_mm_free(&fb->ram->tags, &tile->tag);
+	nvkm_mm_free(&fb->tags, &tile->tag);
 }
 
 void
@@ -77,15 +77,22 @@ nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 	nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
+u32
+nv20_fb_tags(struct nvkm_fb *fb)
+{
+	const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320);
+	return tags ? tags + 1 : 0;
+}
+
 static const struct nvkm_fb_func
 nv20_fb = {
+	.tags = nv20_fb_tags,
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv20_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv20_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index c56746d2a502c..7709f5fe9a458 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
 		else              tile->zcomp = 0x00200000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -44,13 +44,13 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 
 static const struct nvkm_fb_func
 nv25_fb = {
+	.tags = nv20_fb_tags,
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv25_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv20_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 2a7c4831b8217..8aa7826665079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
 		else           tile->zcomp |= 0x02000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -116,6 +116,7 @@ nv30_fb_init(struct nvkm_fb *fb)
 
 static const struct nvkm_fb_func
 nv30_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
@@ -123,7 +124,6 @@ nv30_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv20_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 1604b3789ad16..6e83dcff72e08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
 		else           tile->zcomp |= 0x08000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -45,6 +45,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 
 static const struct nvkm_fb_func
 nv35_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv35_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv20_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 80cc0a6e3416d..2a07617bb44cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
 		else           tile->zcomp |= 0x20000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -45,6 +45,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 
 static const struct nvkm_fb_func
 nv36_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv36_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv20_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index deec46a310f8f..955160778b5b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 	u32 tiles = DIV_ROUND_UP(size, 0x80);
 	u32 tags  = round_up(tiles / fb->ram->parts, 0x100);
 	if ( (flags & 2) &&
-	    !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	    !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
 		tile->zcomp |= ((tile->tag->offset           ) >> 8);
 		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -51,6 +51,7 @@ nv40_fb_init(struct nvkm_fb *fb)
 
 static const struct nvkm_fb_func
 nv40_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv40_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
@@ -58,7 +59,6 @@ nv40_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
 	.ram_new = nv40_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index 79e57dd5a00f4..b77f08d34cc35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -45,6 +45,7 @@ nv41_fb_init(struct nvkm_fb *fb)
 
 static const struct nvkm_fb_func
 nv41_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv41_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv41_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
 	.ram_new = nv41_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 06246cce5ec45..b59dc486083de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -62,7 +62,6 @@ nv44_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
 	.ram_new = nv44_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 3598a1aa65beb..cab7d20fa0398 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -48,7 +48,6 @@ nv46_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
 	.ram_new = nv44_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index c505e44293142..a8b0ad4c871db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -28,6 +28,7 @@
 
 static const struct nvkm_fb_func
 nv47_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv47_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
 	.ram_new = nv41_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 7b91b9f170e5b..d0b317bb0252e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -28,6 +28,7 @@
 
 static const struct nvkm_fb_func
 nv49_fb = {
+	.tags = nv20_fb_tags,
 	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv49_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
 	.ram_new = nv49_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 4e98210c1b1c7..6a6f0c0860713 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -34,7 +34,6 @@ nv4e_fb = {
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
 	.ram_new = nv44_ram_new,
-	.memtype_valid = nv04_fb_memtype_valid,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0595e0722bfcc..b2f5bf8144eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -28,18 +28,6 @@
 #include <core/enum.h>
 #include <engine/fifo.h>
 
-int
-nv50_fb_memtype[0x80] = {
-	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
-	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
-	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
 static int
 nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
 {
@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
 	return fb->func->ram_new(&fb->base, pram);
 }
 
-static bool
-nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
-	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
-}
-
 static const struct nvkm_enum vm_dispatch_subclients[] = {
 	{ 0x00000000, "GRCTX" },
 	{ 0x00000001, "NOTIFY" },
@@ -244,6 +226,15 @@ nv50_fb_init(struct nvkm_fb *base)
 	nvkm_wr32(device, 0x100c90, fb->func->trap);
 }
 
+static u32
+nv50_fb_tags(struct nvkm_fb *base)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	if (fb->func->tags)
+		return fb->func->tags(&fb->base);
+	return 0;
+}
+
 static void *
 nv50_fb_dtor(struct nvkm_fb *base)
 {
@@ -262,11 +253,11 @@ nv50_fb_dtor(struct nvkm_fb *base)
 static const struct nvkm_fb_func
 nv50_fb_ = {
 	.dtor = nv50_fb_dtor,
+	.tags = nv50_fb_tags,
 	.oneinit = nv50_fb_oneinit,
 	.init = nv50_fb_init,
 	.intr = nv50_fb_intr,
 	.ram_new = nv50_fb_ram_new,
-	.memtype_valid = nv50_fb_memtype_valid,
 };
 
 int
@@ -287,6 +278,7 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
 static const struct nv50_fb_func
 nv50_fb = {
 	.ram_new = nv50_ram_new,
+	.tags = nv20_fb_tags,
 	.trap = 0x000707ff,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index faa88c8c66fe6..13231d4b00d93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -12,10 +12,10 @@ struct nv50_fb {
 
 struct nv50_fb_func {
 	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
+	u32 (*tags)(struct nvkm_fb *);
 	u32 trap;
 };
 
 int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
 		 struct nvkm_fb **pfb);
-extern int nv50_fb_memtype[0x80];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index e905d44fa1d59..e05d95240e853 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -6,9 +6,10 @@ struct nvkm_bios;
 
 struct nvkm_fb_func {
 	void *(*dtor)(struct nvkm_fb *);
+	u32 (*tags)(struct nvkm_fb *);
 	int (*oneinit)(struct nvkm_fb *);
 	void (*init)(struct nvkm_fb *);
-	void (*init_page)(struct nvkm_fb *);
+	int (*init_page)(struct nvkm_fb *);
 	void (*init_unkn)(struct nvkm_fb *);
 	void (*intr)(struct nvkm_fb *);
 
@@ -24,7 +25,7 @@ struct nvkm_fb_func {
 
 	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 
-	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+	u8 default_bigpage;
 };
 
 void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
@@ -33,13 +34,12 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
 		 int index, struct nvkm_fb **);
 int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-
 void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
 void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
 
+u32 nv20_fb_tags(struct nvkm_fb *);
 void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
@@ -62,8 +62,7 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 
 int gf100_fb_oneinit(struct nvkm_fb *);
-void gf100_fb_init_page(struct nvkm_fb *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+int gf100_fb_init_page(struct nvkm_fb *);
 
-void gm200_fb_init_page(struct nvkm_fb *);
+int gm200_fb_init_page(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index c17d559dbfbe3..24c7bd5057316 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -21,8 +21,132 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
+#define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
 #include "ram.h"
 
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+struct nvkm_vram {
+	struct nvkm_memory memory;
+	struct nvkm_ram *ram;
+	u8 page;
+	struct nvkm_mm_node *mn;
+};
+
+static int
+nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+	      struct nvkm_vma *vma, void *argv, u32 argc)
+{
+	struct nvkm_vram *vram = nvkm_vram(memory);
+	struct nvkm_vmm_map map = {
+		.memory = &vram->memory,
+		.offset = offset,
+		.mem = vram->mn,
+	};
+
+	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static u64
+nvkm_vram_size(struct nvkm_memory *memory)
+{
+	return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u64
+nvkm_vram_addr(struct nvkm_memory *memory)
+{
+	struct nvkm_vram *vram = nvkm_vram(memory);
+	if (!nvkm_mm_contiguous(vram->mn))
+		return ~0ULL;
+	return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u8
+nvkm_vram_page(struct nvkm_memory *memory)
+{
+	return nvkm_vram(memory)->page;
+}
+
+static enum nvkm_memory_target
+nvkm_vram_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_VRAM;
+}
+
+static void *
+nvkm_vram_dtor(struct nvkm_memory *memory)
+{
+	struct nvkm_vram *vram = nvkm_vram(memory);
+	struct nvkm_mm_node *next = vram->mn;
+	struct nvkm_mm_node *node;
+	mutex_lock(&vram->ram->fb->subdev.mutex);
+	while ((node = next)) {
+		next = node->next;
+		nvkm_mm_free(&vram->ram->vram, &node);
+	}
+	mutex_unlock(&vram->ram->fb->subdev.mutex);
+	return vram;
+}
+
+static const struct nvkm_memory_func
+nvkm_vram = {
+	.dtor = nvkm_vram_dtor,
+	.target = nvkm_vram_target,
+	.page = nvkm_vram_page,
+	.addr = nvkm_vram_addr,
+	.size = nvkm_vram_size,
+	.map = nvkm_vram_map,
+};
+
+int
+nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
+	     bool contig, bool back, struct nvkm_memory **pmemory)
+{
+	struct nvkm_ram *ram;
+	struct nvkm_mm *mm;
+	struct nvkm_mm_node **node, *r;
+	struct nvkm_vram *vram;
+	u8   page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
+	u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
+	u32   max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
+	u32   min = contig ? max : align;
+	int ret;
+
+	if (!device->fb || !(ram = device->fb->ram))
+		return -ENODEV;
+	ram = device->fb->ram;
+	mm = &ram->vram;
+
+	if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+	vram->ram = ram;
+	vram->page = page;
+	*pmemory = &vram->memory;
+
+	mutex_lock(&ram->fb->subdev.mutex);
+	node = &vram->mn;
+	do {
+		if (back)
+			ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
+		else
+			ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
+		if (ret) {
+			mutex_unlock(&ram->fb->subdev.mutex);
+			nvkm_memory_unref(pmemory);
+			return ret;
+		}
+
+		*node = r;
+		node = &r->next;
+		max -= r->length;
+	} while (max);
+	mutex_unlock(&ram->fb->subdev.mutex);
+	return 0;
+}
+
 int
 nvkm_ram_init(struct nvkm_ram *ram)
 {
@@ -38,7 +162,6 @@ nvkm_ram_del(struct nvkm_ram **pram)
 	if (ram && !WARN_ON(!ram->func)) {
 		if (ram->func->dtor)
 			*pram = ram->func->dtor(ram);
-		nvkm_mm_fini(&ram->tags);
 		nvkm_mm_fini(&ram->vram);
 		kfree(*pram);
 		*pram = NULL;
@@ -47,8 +170,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
 
 int
 nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
-	      enum nvkm_ram_type type, u64 size, u32 tags,
-	      struct nvkm_ram *ram)
+	      enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
 {
 	static const char *name[] = {
 		[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
@@ -73,28 +195,20 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 	ram->size = size;
 
 	if (!nvkm_mm_initialised(&ram->vram)) {
-		ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
+				   size >> NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
 	}
 
-	if (!nvkm_mm_initialised(&ram->tags)) {
-		ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
-		if (ret)
-			return ret;
-
-		nvkm_debug(subdev, "%d compression tags\n", tags);
-	}
-
 	return 0;
 }
 
 int
 nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
-	      enum nvkm_ram_type type, u64 size, u32 tags,
-	      struct nvkm_ram **pram)
+	      enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
 {
 	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+	return nvkm_ram_ctor(func, fb, type, size, *pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index fac7e73c3ddfb..70fd59dcd06d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -3,11 +3,9 @@
 #include "priv.h"
 
 int  nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
-		   enum nvkm_ram_type, u64 size, u32 tags,
-		   struct nvkm_ram *);
+		   enum nvkm_ram_type, u64 size, struct nvkm_ram *);
 int  nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
-		   enum nvkm_ram_type, u64 size, u32 tags,
-		   struct nvkm_ram **);
+		   enum nvkm_ram_type, u64 size, struct nvkm_ram **);
 void nvkm_ram_del(struct nvkm_ram **);
 int  nvkm_ram_init(struct nvkm_ram *);
 
@@ -15,9 +13,6 @@ extern const struct nvkm_ram_func nv04_ram_func;
 
 int  nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
 		   struct nvkm_ram *);
-int  nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
 
 int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
 		   struct nvkm_ram **);
@@ -28,8 +23,6 @@ u32  gf100_ram_probe_fbp(const struct nvkm_ram_func *,
 u32  gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
 				struct nvkm_device *, int, int *);
 u32  gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
-int  gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
 int gf100_ram_init(struct nvkm_ram *);
 int gf100_ram_calc(struct nvkm_ram *, u32);
 int gf100_ram_prog(struct nvkm_ram *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 4a9bd4f1cb931..ac87a3b6b7c9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -32,7 +32,6 @@
 #include <subdev/bios/timing.h>
 #include <subdev/clk.h>
 #include <subdev/clk/pll.h>
-#include <subdev/ltc.h>
 
 struct gf100_ramfuc {
 	struct ramfuc base;
@@ -420,86 +419,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
 	ram_exec(&ram->fuc, false);
 }
 
-void
-gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
-	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
-	struct nvkm_mem *mem = *pmem;
-
-	*pmem = NULL;
-	if (unlikely(mem == NULL))
-		return;
-
-	mutex_lock(&ram->fb->subdev.mutex);
-	if (mem->tag)
-		nvkm_ltc_tags_free(ltc, &mem->tag);
-	__nv50_ram_put(ram, mem);
-	mutex_unlock(&ram->fb->subdev.mutex);
-
-	kfree(mem);
-}
-
-int
-gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
-	      u32 memtype, struct nvkm_mem **pmem)
-{
-	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
-	struct nvkm_mm *mm = &ram->vram;
-	struct nvkm_mm_node **node, *r;
-	struct nvkm_mem *mem;
-	int type = (memtype & 0x0ff);
-	int back = (memtype & 0x800);
-	const bool comp = gf100_pte_storage_type_map[type] != type;
-	int ret;
-
-	size  >>= NVKM_RAM_MM_SHIFT;
-	align >>= NVKM_RAM_MM_SHIFT;
-	ncmin >>= NVKM_RAM_MM_SHIFT;
-	if (!ncmin)
-		ncmin = size;
-
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem)
-		return -ENOMEM;
-
-	mem->size = size;
-
-	mutex_lock(&ram->fb->subdev.mutex);
-	if (comp) {
-		/* compression only works with lpages */
-		if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
-			int n = size >> 5;
-			nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
-		}
-
-		if (unlikely(!mem->tag))
-			type = gf100_pte_storage_type_map[type];
-	}
-	mem->memtype = type;
-
-	node = &mem->mem;
-	do {
-		if (back)
-			ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
-		else
-			ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
-		if (ret) {
-			mutex_unlock(&ram->fb->subdev.mutex);
-			ram->func->put(ram, &mem);
-			return ret;
-		}
-
-		*node = r;
-		node = &r->next;
-		size -= r->length;
-	} while (size);
-	mutex_unlock(&ram->fb->subdev.mutex);
-
-	mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
-	*pmem = mem;
-	return 0;
-}
-
 int
 gf100_ram_init(struct nvkm_ram *base)
 {
@@ -604,7 +523,7 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 	nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
 	nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
 
-	ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
+	ret = nvkm_ram_ctor(func, fb, type, total, ram);
 	if (ret)
 		return ret;
 
@@ -617,7 +536,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 	 */
 	if (lower != total) {
 		/* The common memory amount is addressed normally. */
-		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+				   rsvd_head >> NVKM_RAM_MM_SHIFT,
 				   (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
@@ -625,13 +545,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
 		/* And the rest is much higher in the physical address
 		 * space, and may not be usable for certain operations.
 		 */
-		ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
+		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
+				   ubase >> NVKM_RAM_MM_SHIFT,
 				   (usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
 	} else {
 		/* GPUs without mixed-memory are a lot nicer... */
-		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+		ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+				   rsvd_head >> NVKM_RAM_MM_SHIFT,
 				   (total - rsvd_head - rsvd_tail) >>
 				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
@@ -738,8 +660,6 @@ gf100_ram = {
 	.probe_fbp_amount = gf100_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
 	.init = gf100_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 	.calc = gf100_ram_calc,
 	.prog = gf100_ram_prog,
 	.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 985ec64cf369b..70a06e3cd55a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -48,8 +48,6 @@ gf108_ram = {
 	.probe_fbp_amount = gf108_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
 	.init = gf100_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 	.calc = gf100_ram_calc,
 	.prog = gf100_ram_prog,
 	.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 75814f15eb53c..8bcb7e79a0cb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1704,8 +1704,6 @@ gk104_ram = {
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
 	.dtor = gk104_ram_dtor,
 	.init = gk104_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 	.calc = gk104_ram_calc,
 	.prog = gk104_ram_prog,
 	.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 3f0b56347291b..27c68e3f97721 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -39,8 +39,6 @@ gm107_ram = {
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
 	.dtor = gk104_ram_dtor,
 	.init = gk104_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 	.calc = gk104_ram_calc,
 	.prog = gk104_ram_prog,
 	.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index fd8facf904766..6b0cac1fe7b44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -54,8 +54,6 @@ gm200_ram = {
 	.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
 	.dtor = gk104_ram_dtor,
 	.init = gk104_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 	.calc = gk104_ram_calc,
 	.prog = gk104_ram_prog,
 	.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index df8a87333b674..adb62a6beb638 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -84,8 +84,6 @@ gp100_ram = {
 	.probe_fbp_amount = gm200_ram_probe_fbp_amount,
 	.probe_fbpa_amount = gp100_ram_probe_fbpa,
 	.init = gp100_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index f10664372161d..920b3d3478030 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -26,6 +26,7 @@
 #include "ram.h"
 #include "ramfuc.h"
 
+#include <core/memory.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0205.h>
@@ -86,7 +87,7 @@ struct gt215_ltrain {
 	u32 r_100720;
 	u32 r_1111e0;
 	u32 r_111400;
-	struct nvkm_mem *mem;
+	struct nvkm_memory *memory;
 };
 
 struct gt215_ram {
@@ -279,10 +280,10 @@ gt215_link_train_init(struct gt215_ram *ram)
 	struct gt215_ltrain *train = &ram->ltrain;
 	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_bios *bios = device->bios;
-	struct nvkm_mem *mem;
 	struct nvbios_M0205E M0205E;
 	u8 ver, hdr, cnt, len;
 	u32 r001700;
+	u64 addr;
 	int ret, i = 0;
 
 	train->state = NVA3_TRAIN_UNSUPPORTED;
@@ -297,14 +298,14 @@ gt215_link_train_init(struct gt215_ram *ram)
 
 	train->state = NVA3_TRAIN_ONCE;
 
-	ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
-				  &ram->ltrain.mem);
+	ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 16, 0x8000,
+			   true, true, &ram->ltrain.memory);
 	if (ret)
 		return ret;
 
-	mem = ram->ltrain.mem;
+	addr = nvkm_memory_addr(ram->ltrain.memory);
 
-	nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+	nvkm_wr32(device, 0x100538, 0x10000000 | (addr >> 16));
 	nvkm_wr32(device, 0x1005a8, 0x0000ffff);
 	nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
 
@@ -320,7 +321,7 @@ gt215_link_train_init(struct gt215_ram *ram)
 
 	/* And upload the pattern */
 	r001700 = nvkm_rd32(device, 0x1700);
-	nvkm_wr32(device, 0x1700, mem->offset >> 16);
+	nvkm_wr32(device, 0x1700, addr >> 16);
 	for (i = 0; i < 16; i++)
 		nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
 	for (i = 0; i < 16; i++)
@@ -336,8 +337,7 @@ gt215_link_train_init(struct gt215_ram *ram)
 static void
 gt215_link_train_fini(struct gt215_ram *ram)
 {
-	if (ram->ltrain.mem)
-		ram->base.func->put(&ram->base, &ram->ltrain.mem);
+	nvkm_memory_unref(&ram->ltrain.memory);
 }
 
 /*
@@ -931,8 +931,6 @@ static const struct nvkm_ram_func
 gt215_ram_func = {
 	.dtor = gt215_ram_dtor,
 	.init = gt215_ram_init,
-	.get = nv50_ram_get,
-	.put = nv50_ram_put,
 	.calc = gt215_ram_calc,
 	.prog = gt215_ram_prog,
 	.tidy = gt215_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 017a91de74a0d..7de18e53ef45c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -53,8 +53,6 @@ mcp77_ram_init(struct nvkm_ram *base)
 static const struct nvkm_ram_func
 mcp77_ram_func = {
 	.init = mcp77_ram_init,
-	.get = nv50_ram_get,
-	.put = nv50_ram_put,
 };
 
 int
@@ -73,7 +71,7 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	*pram = &ram->base;
 
 	ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
-			    size, 0, &ram->base);
+			    size, &ram->base);
 	if (ret)
 		return ret;
 
@@ -81,7 +79,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	ram->base.stolen = base;
 	nvkm_mm_fini(&ram->base.vram);
 
-	return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+	return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
+			    rsvd_head >> NVKM_RAM_MM_SHIFT,
 			    (size - rsvd_head - rsvd_tail) >>
 			    NVKM_RAM_MM_SHIFT, 1);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 6f053a03d61c7..cc764a93f1a3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -61,5 +61,5 @@ nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	else
 		type = NVKM_RAM_TYPE_SDRAM;
 
-	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index dfd155c98dbb0..afe54e323b18e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -36,5 +36,5 @@ nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	else
 		type = NVKM_RAM_TYPE_SDRAM;
 
-	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index 3c6a8710e8127..4c07d10bb9760 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -44,5 +44,5 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	}
 
 	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
-			     mib * 1024 * 1024, 0, pram);
+			     mib * 1024 * 1024, pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index 747e47c10cc74..71d63d7daa75b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -29,7 +29,6 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	struct nvkm_device *device = fb->subdev.device;
 	u32 pbus1218 =  nvkm_rd32(device, 0x001218);
 	u32     size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
-	u32     tags =  nvkm_rd32(device, 0x100320);
 	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
@@ -40,7 +39,7 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
 	}
 
-	ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
+	ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 70c63535d56b3..2b12e388f47aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -187,13 +187,13 @@ nv40_ram_func = {
 
 int
 nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
-	      u32 tags, struct nvkm_ram **pram)
+	      struct nvkm_ram **pram)
 {
 	struct nv40_ram *ram;
 	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
 		return -ENOMEM;
 	*pram = &ram->base;
-	return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+	return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, &ram->base);
 }
 
 int
@@ -202,7 +202,6 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	struct nvkm_device *device = fb->subdev.device;
 	u32 pbus1218 = nvkm_rd32(device, 0x001218);
 	u32     size = nvkm_rd32(device, 0x10020c) & 0xff000000;
-	u32     tags = nvkm_rd32(device, 0x100320);
 	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
@@ -213,7 +212,7 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
 	}
 
-	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	ret = nv40_ram_new_(fb, type, size, pram);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index 8a0524566b486..ec5dcbfcaea8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -9,6 +9,6 @@ struct nv40_ram {
 	u32 coef;
 };
 
-int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64,
 		  struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 114828be292ef..d3fea3726461d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -28,7 +28,6 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
 	struct nvkm_device *device = fb->subdev.device;
 	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
-	u32  tags = nvkm_rd32(device, 0x100320);
 	u32 fb474 = nvkm_rd32(device, 0x100474);
 	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
@@ -40,7 +39,7 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	if (fb474 & 0x00000001)
 		type = NVKM_RAM_TYPE_DDR1;
 
-	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	ret = nv40_ram_new_(fb, type, size, pram);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index bc56fbf1c788c..ab2630e5e6fbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -38,5 +38,5 @@ nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	if (fb474 & 0x00000001)
 		type = NVKM_RAM_TYPE_DDR1;
 
-	return nv40_ram_new_(fb, type, size, 0, pram);
+	return nv40_ram_new_(fb, type, size, pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index c01f4b1022b87..946ca7c2e0b69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -28,7 +28,6 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
 	struct nvkm_device *device = fb->subdev.device;
 	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
-	u32  tags = nvkm_rd32(device, 0x100320);
 	u32 fb914 = nvkm_rd32(device, 0x100914);
 	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
@@ -40,7 +39,7 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	case 0x00000003: break;
 	}
 
-	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	ret = nv40_ram_new_(fb, type, size, pram);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index fa3c2e06203dc..02b8bdbc819f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -29,5 +29,5 @@ nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 	struct nvkm_device *device = fb->subdev.device;
 	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
 	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
-			     size, 0, pram);
+			     size, pram);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 6549b05883095..2ccb4b6be1538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -493,100 +493,8 @@ nv50_ram_tidy(struct nvkm_ram *base)
 	ram_exec(&ram->hwsq, false);
 }
 
-void
-__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
-{
-	struct nvkm_mm_node *next = mem->mem;
-	struct nvkm_mm_node *node;
-	while ((node = next)) {
-		next = node->next;
-		nvkm_mm_free(&ram->vram, &node);
-	}
-	nvkm_mm_free(&ram->tags, &mem->tag);
-}
-
-void
-nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
-	struct nvkm_mem *mem = *pmem;
-
-	*pmem = NULL;
-	if (unlikely(mem == NULL))
-		return;
-
-	mutex_lock(&ram->fb->subdev.mutex);
-	__nv50_ram_put(ram, mem);
-	mutex_unlock(&ram->fb->subdev.mutex);
-
-	kfree(mem);
-}
-
-int
-nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
-	     u32 memtype, struct nvkm_mem **pmem)
-{
-	struct nvkm_mm *heap = &ram->vram;
-	struct nvkm_mm *tags = &ram->tags;
-	struct nvkm_mm_node **node, *r;
-	struct nvkm_mem *mem;
-	int comp = (memtype & 0x300) >> 8;
-	int type = (memtype & 0x07f);
-	int back = (memtype & 0x800);
-	int min, max, ret;
-
-	max = (size >> NVKM_RAM_MM_SHIFT);
-	min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
-	align >>= NVKM_RAM_MM_SHIFT;
-
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem)
-		return -ENOMEM;
-
-	mutex_lock(&ram->fb->subdev.mutex);
-	if (comp) {
-		if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
-			int n = (max >> 4) * comp;
-
-			ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
-			if (ret)
-				mem->tag = NULL;
-		}
-
-		if (unlikely(!mem->tag))
-			comp = 0;
-	}
-
-	mem->memtype = (comp << 7) | type;
-	mem->size = max;
-
-	type = nv50_fb_memtype[type];
-	node = &mem->mem;
-	do {
-		if (back)
-			ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
-		else
-			ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
-		if (ret) {
-			mutex_unlock(&ram->fb->subdev.mutex);
-			ram->func->put(ram, &mem);
-			return ret;
-		}
-
-		*node = r;
-		node = &r->next;
-		max -= r->length;
-	} while (max);
-	mutex_unlock(&ram->fb->subdev.mutex);
-
-	mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
-	*pmem = mem;
-	return 0;
-}
-
 static const struct nvkm_ram_func
 nv50_ram_func = {
-	.get = nv50_ram_get,
-	.put = nv50_ram_put,
 	.calc = nv50_ram_calc,
 	.prog = nv50_ram_prog,
 	.tidy = nv50_ram_tidy,
@@ -639,7 +547,6 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
 	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
 	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
 	u64 size = nvkm_rd32(device, 0x10020c);
-	u32 tags = nvkm_rd32(device, 0x100320);
 	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
@@ -660,7 +567,7 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
 
 	size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
 
-	ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
+	ret = nvkm_ram_ctor(func, fb, type, size, ram);
 	if (ret)
 		return ret;
 
@@ -669,7 +576,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
 	ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
 	nvkm_mm_fini(&ram->vram);
 
-	return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+	return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+			    rsvd_head >> NVKM_RAM_MM_SHIFT,
 			    (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
 			    nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 10c987a654ec5..364ea4492acc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,181 +23,90 @@
  */
 #include "priv.h"
 
-#include <core/memory.h>
 #include <subdev/bar.h>
 
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
-#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
-
-struct nvkm_instobj {
-	struct nvkm_memory memory;
-	struct nvkm_memory *parent;
-	struct nvkm_instmem *imem;
-	struct list_head head;
-	u32 *suspend;
-	void __iomem *map;
-};
-
-static enum nvkm_memory_target
-nvkm_instobj_target(struct nvkm_memory *memory)
-{
-	memory = nvkm_instobj(memory)->parent;
-	return nvkm_memory_target(memory);
-}
-
-static u64
-nvkm_instobj_addr(struct nvkm_memory *memory)
-{
-	memory = nvkm_instobj(memory)->parent;
-	return nvkm_memory_addr(memory);
-}
-
-static u64
-nvkm_instobj_size(struct nvkm_memory *memory)
-{
-	memory = nvkm_instobj(memory)->parent;
-	return nvkm_memory_size(memory);
-}
-
 static void
-nvkm_instobj_release(struct nvkm_memory *memory)
+nvkm_instobj_load(struct nvkm_instobj *iobj)
 {
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	nvkm_bar_flush(iobj->imem->subdev.device->bar);
-}
-
-static void __iomem *
-nvkm_instobj_acquire(struct nvkm_memory *memory)
-{
-	return nvkm_instobj(memory)->map;
-}
-
-static u32
-nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
-	return ioread32_native(nvkm_instobj(memory)->map + offset);
-}
-
-static void
-nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
-{
-	iowrite32_native(data, nvkm_instobj(memory)->map + offset);
-}
+	struct nvkm_memory *memory = &iobj->memory;
+	const u64 size = nvkm_memory_size(memory);
+	void __iomem *map;
+	int i;
 
-static void
-nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
-{
-	memory = nvkm_instobj(memory)->parent;
-	nvkm_memory_map(memory, vma, offset);
-}
+	if (!(map = nvkm_kmap(memory))) {
+		for (i = 0; i < size; i += 4)
+			nvkm_wo32(memory, i, iobj->suspend[i / 4]);
+	} else {
+		memcpy_toio(map, iobj->suspend, size);
+	}
+	nvkm_done(memory);
 
-static void *
-nvkm_instobj_dtor(struct nvkm_memory *memory)
-{
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	spin_lock(&iobj->imem->lock);
-	list_del(&iobj->head);
-	spin_unlock(&iobj->imem->lock);
-	nvkm_memory_del(&iobj->parent);
-	return iobj;
+	kvfree(iobj->suspend);
+	iobj->suspend = NULL;
 }
 
-static const struct nvkm_memory_func
-nvkm_instobj_func = {
-	.dtor = nvkm_instobj_dtor,
-	.target = nvkm_instobj_target,
-	.addr = nvkm_instobj_addr,
-	.size = nvkm_instobj_size,
-	.acquire = nvkm_instobj_acquire,
-	.release = nvkm_instobj_release,
-	.rd32 = nvkm_instobj_rd32,
-	.wr32 = nvkm_instobj_wr32,
-	.map = nvkm_instobj_map,
-};
-
-static void
-nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+static int
+nvkm_instobj_save(struct nvkm_instobj *iobj)
 {
-	memory = nvkm_instobj(memory)->parent;
-	nvkm_memory_boot(memory, vm);
-}
+	struct nvkm_memory *memory = &iobj->memory;
+	const u64 size = nvkm_memory_size(memory);
+	void __iomem *map;
+	int i;
 
-static void
-nvkm_instobj_release_slow(struct nvkm_memory *memory)
-{
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	nvkm_instobj_release(memory);
-	nvkm_done(iobj->parent);
-}
+	iobj->suspend = kvmalloc(size, GFP_KERNEL);
+	if (!iobj->suspend)
+		return -ENOMEM;
 
-static void __iomem *
-nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
-{
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	iobj->map = nvkm_kmap(iobj->parent);
-	if (iobj->map)
-		memory->func = &nvkm_instobj_func;
-	return iobj->map;
+	if (!(map = nvkm_kmap(memory))) {
+		for (i = 0; i < size; i += 4)
+			iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+	} else {
+		memcpy_fromio(iobj->suspend, map, size);
+	}
+	nvkm_done(memory);
+	return 0;
 }
 
-static u32
-nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+void
+nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
 {
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	return nvkm_ro32(iobj->parent, offset);
+	spin_lock(&imem->lock);
+	list_del(&iobj->head);
+	spin_unlock(&imem->lock);
 }
 
-static void
-nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+void
+nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+		  struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
 {
-	struct nvkm_instobj *iobj = nvkm_instobj(memory);
-	return nvkm_wo32(iobj->parent, offset, data);
+	nvkm_memory_ctor(func, &iobj->memory);
+	iobj->suspend = NULL;
+	spin_lock(&imem->lock);
+	list_add_tail(&iobj->head, &imem->list);
+	spin_unlock(&imem->lock);
 }
 
-static const struct nvkm_memory_func
-nvkm_instobj_func_slow = {
-	.dtor = nvkm_instobj_dtor,
-	.target = nvkm_instobj_target,
-	.addr = nvkm_instobj_addr,
-	.size = nvkm_instobj_size,
-	.boot = nvkm_instobj_boot,
-	.acquire = nvkm_instobj_acquire_slow,
-	.release = nvkm_instobj_release_slow,
-	.rd32 = nvkm_instobj_rd32_slow,
-	.wr32 = nvkm_instobj_wr32_slow,
-	.map = nvkm_instobj_map,
-};
-
 int
 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
 		 struct nvkm_memory **pmemory)
 {
+	struct nvkm_subdev *subdev = &imem->subdev;
 	struct nvkm_memory *memory = NULL;
-	struct nvkm_instobj *iobj;
 	u32 offset;
 	int ret;
 
 	ret = imem->func->memory_new(imem, size, align, zero, &memory);
-	if (ret)
+	if (ret) {
+		nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
 		goto done;
-
-	if (!imem->func->persistent) {
-		if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
-			ret = -ENOMEM;
-			goto done;
-		}
-
-		nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
-		iobj->parent = memory;
-		iobj->imem = imem;
-		spin_lock(&iobj->imem->lock);
-		list_add_tail(&iobj->head, &imem->list);
-		spin_unlock(&iobj->imem->lock);
-		memory = &iobj->memory;
 	}
 
+	nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
+		   zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+
 	if (!imem->func->zero && zero) {
 		void __iomem *map = nvkm_kmap(memory);
 		if (unlikely(!map)) {
@@ -211,7 +120,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
 
 done:
 	if (ret)
-		nvkm_memory_del(&memory);
+		nvkm_memory_unref(&memory);
 	*pmemory = memory;
 	return ret;
 }
@@ -232,39 +141,46 @@ nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 	return imem->func->wr32(imem, addr, data);
 }
 
+void
+nvkm_instmem_boot(struct nvkm_instmem *imem)
+{
+	/* Separate bootstrapped objects from normal list, as we need
+	 * to make sure they're accessed with the slowpath on suspend
+	 * and resume.
+	 */
+	struct nvkm_instobj *iobj, *itmp;
+	spin_lock(&imem->lock);
+	list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
+		list_move_tail(&iobj->head, &imem->boot);
+	}
+	spin_unlock(&imem->lock);
+}
+
 static int
 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
 {
 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int i;
-
-	if (imem->func->fini)
-		imem->func->fini(imem);
 
 	if (suspend) {
 		list_for_each_entry(iobj, &imem->list, head) {
-			struct nvkm_memory *memory = iobj->parent;
-			u64 size = nvkm_memory_size(memory);
+			int ret = nvkm_instobj_save(iobj);
+			if (ret)
+				return ret;
+		}
 
-			iobj->suspend = vmalloc(size);
-			if (!iobj->suspend)
-				return -ENOMEM;
+		nvkm_bar_bar2_fini(subdev->device);
 
-			for (i = 0; i < size; i += 4)
-				iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+		list_for_each_entry(iobj, &imem->boot, head) {
+			int ret = nvkm_instobj_save(iobj);
+			if (ret)
+				return ret;
 		}
 	}
 
-	return 0;
-}
+	if (imem->func->fini)
+		imem->func->fini(imem);
 
-static int
-nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
-{
-	struct nvkm_instmem *imem = nvkm_instmem(subdev);
-	if (imem->func->oneinit)
-		return imem->func->oneinit(imem);
 	return 0;
 }
 
@@ -273,22 +189,31 @@ nvkm_instmem_init(struct nvkm_subdev *subdev)
 {
 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int i;
+
+	list_for_each_entry(iobj, &imem->boot, head) {
+		if (iobj->suspend)
+			nvkm_instobj_load(iobj);
+	}
+
+	nvkm_bar_bar2_init(subdev->device);
 
 	list_for_each_entry(iobj, &imem->list, head) {
-		if (iobj->suspend) {
-			struct nvkm_memory *memory = iobj->parent;
-			u64 size = nvkm_memory_size(memory);
-			for (i = 0; i < size; i += 4)
-				nvkm_wo32(memory, i, iobj->suspend[i / 4]);
-			vfree(iobj->suspend);
-			iobj->suspend = NULL;
-		}
+		if (iobj->suspend)
+			nvkm_instobj_load(iobj);
 	}
 
 	return 0;
 }
 
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->oneinit)
+		return imem->func->oneinit(imem);
+	return 0;
+}
+
 static void *
 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
 {
@@ -315,4 +240,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
 	imem->func = func;
 	spin_lock_init(&imem->lock);
 	INIT_LIST_HEAD(&imem->list);
+	INIT_LIST_HEAD(&imem->boot);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index cd5adbec5e574..985f2990ab0dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -44,14 +44,13 @@
 #include "priv.h"
 
 #include <core/memory.h>
-#include <core/mm.h>
 #include <core/tegra.h>
-#include <subdev/fb.h>
 #include <subdev/ltc.h>
+#include <subdev/mmu.h>
 
 struct gk20a_instobj {
 	struct nvkm_memory memory;
-	struct nvkm_mem mem;
+	struct nvkm_mm_node *mn;
 	struct gk20a_instmem *imem;
 
 	/* CPU mapping */
@@ -119,16 +118,22 @@ gk20a_instobj_target(struct nvkm_memory *memory)
 	return NVKM_MEM_TARGET_NCOH;
 }
 
+static u8
+gk20a_instobj_page(struct nvkm_memory *memory)
+{
+	return 12;
+}
+
 static u64
 gk20a_instobj_addr(struct nvkm_memory *memory)
 {
-	return gk20a_instobj(memory)->mem.offset;
+	return (u64)gk20a_instobj(memory)->mn->offset << 12;
 }
 
 static u64
 gk20a_instobj_size(struct nvkm_memory *memory)
 {
-	return (u64)gk20a_instobj(memory)->mem.size << 12;
+	return (u64)gk20a_instobj(memory)->mn->length << 12;
 }
 
 /*
@@ -272,12 +277,18 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 	node->vaddr[offset / 4] = data;
 }
 
-static void
-gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+		  struct nvkm_vma *vma, void *argv, u32 argc)
 {
 	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct nvkm_vmm_map map = {
+		.memory = &node->memory,
+		.offset = offset,
+		.mem = node->mn,
+	};
 
-	nvkm_vm_map_at(vma, offset, &node->mem);
+	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
 }
 
 static void *
@@ -290,8 +301,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
 	if (unlikely(!node->base.vaddr))
 		goto out;
 
-	dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
-		       node->handle, imem->attrs);
+	dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
+		       node->base.vaddr, node->handle, imem->attrs);
 
 out:
 	return node;
@@ -303,7 +314,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
 	struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
 	struct gk20a_instmem *imem = node->base.imem;
 	struct device *dev = imem->base.subdev.device->dev;
-	struct nvkm_mm_node *r = node->base.mem.mem;
+	struct nvkm_mm_node *r = node->base.mn;
 	int i;
 
 	if (unlikely(!r))
@@ -321,7 +332,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
 	r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
 
 	/* Unmap pages from GPU address space and free them */
-	for (i = 0; i < node->base.mem.size; i++) {
+	for (i = 0; i < node->base.mn->length; i++) {
 		iommu_unmap(imem->domain,
 			    (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
 		dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
@@ -342,12 +353,11 @@ static const struct nvkm_memory_func
 gk20a_instobj_func_dma = {
 	.dtor = gk20a_instobj_dtor_dma,
 	.target = gk20a_instobj_target,
+	.page = gk20a_instobj_page,
 	.addr = gk20a_instobj_addr,
 	.size = gk20a_instobj_size,
 	.acquire = gk20a_instobj_acquire_dma,
 	.release = gk20a_instobj_release_dma,
-	.rd32 = gk20a_instobj_rd32,
-	.wr32 = gk20a_instobj_wr32,
 	.map = gk20a_instobj_map,
 };
 
@@ -355,13 +365,18 @@ static const struct nvkm_memory_func
 gk20a_instobj_func_iommu = {
 	.dtor = gk20a_instobj_dtor_iommu,
 	.target = gk20a_instobj_target,
+	.page = gk20a_instobj_page,
 	.addr = gk20a_instobj_addr,
 	.size = gk20a_instobj_size,
 	.acquire = gk20a_instobj_acquire_iommu,
 	.release = gk20a_instobj_release_iommu,
+	.map = gk20a_instobj_map,
+};
+
+static const struct nvkm_memory_ptrs
+gk20a_instobj_ptrs = {
 	.rd32 = gk20a_instobj_rd32,
 	.wr32 = gk20a_instobj_wr32,
-	.map = gk20a_instobj_map,
 };
 
 static int
@@ -377,6 +392,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
 	*_node = &node->base;
 
 	nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
+	node->base.memory.ptrs = &gk20a_instobj_ptrs;
 
 	node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 					   &node->handle, GFP_KERNEL,
@@ -397,8 +413,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
 	node->r.offset = node->handle >> 12;
 	node->r.length = (npages << PAGE_SHIFT) >> 12;
 
-	node->base.mem.offset = node->handle;
-	node->base.mem.mem = &node->r;
+	node->base.mn = &node->r;
 	return 0;
 }
 
@@ -424,6 +439,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
 	node->dma_addrs = (void *)(node->pages + npages);
 
 	nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
+	node->base.memory.ptrs = &gk20a_instobj_ptrs;
 
 	/* Allocate backing memory */
 	for (i = 0; i < npages; i++) {
@@ -474,8 +490,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
 	/* IOMMU bit tells that an address is to be resolved through the IOMMU */
 	r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
 
-	node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
-	node->base.mem.mem = r;
+	node->base.mn = r;
 	return 0;
 
 release_area:
@@ -523,13 +538,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 
 	node->imem = imem;
 
-	/* present memory for being mapped using small pages */
-	node->mem.size = size >> 12;
-	node->mem.memtype = 0;
-	node->mem.page_shift = 12;
-
 	nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
-		   size, align, node->mem.offset);
+		   size, align, (u64)node->mn->offset << 12);
 
 	return 0;
 }
@@ -554,7 +564,6 @@ static const struct nvkm_instmem_func
 gk20a_instmem = {
 	.dtor = gk20a_instmem_dtor,
 	.memory_new = gk20a_instobj_new,
-	.persistent = true,
 	.zero = false,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6133c8bb2d423..6bf0dad469195 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -24,7 +24,6 @@
 #define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
 #include "priv.h"
 
-#include <core/memory.h>
 #include <core/ramht.h>
 
 struct nv04_instmem {
@@ -35,30 +34,39 @@ struct nv04_instmem {
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
-#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
 
 struct nv04_instobj {
-	struct nvkm_memory memory;
+	struct nvkm_instobj base;
 	struct nv04_instmem *imem;
 	struct nvkm_mm_node *node;
 };
 
-static enum nvkm_memory_target
-nv04_instobj_target(struct nvkm_memory *memory)
+static void
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	return NVKM_MEM_TARGET_INST;
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
 }
 
-static u64
-nv04_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	return nv04_instobj(memory)->node->offset;
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
 }
 
-static u64
-nv04_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv04_instobj_ptrs = {
+	.rd32 = nv04_instobj_rd32,
+	.wr32 = nv04_instobj_wr32,
+};
+
+static void
+nv04_instobj_release(struct nvkm_memory *memory)
 {
-	return nv04_instobj(memory)->node->length;
 }
 
 static void __iomem *
@@ -69,25 +77,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory)
 	return device->pri + 0x700000 + iobj->node->offset;
 }
 
-static void
-nv04_instobj_release(struct nvkm_memory *memory)
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
 {
+	return nv04_instobj(memory)->node->length;
 }
 
-static u32
-nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
 {
-	struct nv04_instobj *iobj = nv04_instobj(memory);
-	struct nvkm_device *device = iobj->imem->base.subdev.device;
-	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
+	return nv04_instobj(memory)->node->offset;
 }
 
-static void
-nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
 {
-	struct nv04_instobj *iobj = nv04_instobj(memory);
-	struct nvkm_device *device = iobj->imem->base.subdev.device;
-	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
+	return NVKM_MEM_TARGET_INST;
 }
 
 static void *
@@ -97,6 +102,7 @@ nv04_instobj_dtor(struct nvkm_memory *memory)
 	mutex_lock(&iobj->imem->base.subdev.mutex);
 	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
 	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
 	return iobj;
 }
 
@@ -108,8 +114,6 @@ nv04_instobj_func = {
 	.addr = nv04_instobj_addr,
 	.acquire = nv04_instobj_acquire,
 	.release = nv04_instobj_release,
-	.rd32 = nv04_instobj_rd32,
-	.wr32 = nv04_instobj_wr32,
 };
 
 static int
@@ -122,9 +126,10 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 
 	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
 		return -ENOMEM;
-	*pmemory = &iobj->memory;
+	*pmemory = &iobj->base.memory;
 
-	nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+	nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base);
+	iobj->base.memory.ptrs = &nv04_instobj_ptrs;
 	iobj->imem = imem;
 
 	mutex_lock(&imem->base.subdev.mutex);
@@ -160,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base)
 	/* PRAMIN aperture maps over the end of VRAM, reserve it */
 	imem->base.reserved = 512 * 1024;
 
-	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
@@ -194,10 +199,10 @@ static void *
 nv04_instmem_dtor(struct nvkm_instmem *base)
 {
 	struct nv04_instmem *imem = nv04_instmem(base);
-	nvkm_memory_del(&imem->base.ramfc);
-	nvkm_memory_del(&imem->base.ramro);
+	nvkm_memory_unref(&imem->base.ramfc);
+	nvkm_memory_unref(&imem->base.ramro);
 	nvkm_ramht_del(&imem->base.ramht);
-	nvkm_memory_del(&imem->base.vbios);
+	nvkm_memory_unref(&imem->base.vbios);
 	nvkm_mm_fini(&imem->heap);
 	return imem;
 }
@@ -209,7 +214,6 @@ nv04_instmem = {
 	.rd32 = nv04_instmem_rd32,
 	.wr32 = nv04_instmem_wr32,
 	.memory_new = nv04_instobj_new,
-	.persistent = false,
 	.zero = false,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index c0543875e4904..086c118488ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -24,7 +24,6 @@
 #define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
 #include "priv.h"
 
-#include <core/memory.h>
 #include <core/ramht.h>
 #include <engine/gr/nv40.h>
 
@@ -37,30 +36,38 @@ struct nv40_instmem {
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
-#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
 
 struct nv40_instobj {
-	struct nvkm_memory memory;
+	struct nvkm_instobj base;
 	struct nv40_instmem *imem;
 	struct nvkm_mm_node *node;
 };
 
-static enum nvkm_memory_target
-nv40_instobj_target(struct nvkm_memory *memory)
+static void
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	return NVKM_MEM_TARGET_INST;
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
 }
 
-static u64
-nv40_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	return nv40_instobj(memory)->node->offset;
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
 }
 
-static u64
-nv40_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv40_instobj_ptrs = {
+	.rd32 = nv40_instobj_rd32,
+	.wr32 = nv40_instobj_wr32,
+};
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
 {
-	return nv40_instobj(memory)->node->length;
+	wmb();
 }
 
 static void __iomem *
@@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory)
 	return iobj->imem->iomem + iobj->node->offset;
 }
 
-static void
-nv40_instobj_release(struct nvkm_memory *memory)
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
 {
+	return nv40_instobj(memory)->node->length;
 }
 
-static u32
-nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
 {
-	struct nv40_instobj *iobj = nv40_instobj(memory);
-	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
+	return nv40_instobj(memory)->node->offset;
 }
 
-static void
-nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
 {
-	struct nv40_instobj *iobj = nv40_instobj(memory);
-	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+	return NVKM_MEM_TARGET_INST;
 }
 
 static void *
@@ -96,6 +102,7 @@ nv40_instobj_dtor(struct nvkm_memory *memory)
 	mutex_lock(&iobj->imem->base.subdev.mutex);
 	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
 	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
 	return iobj;
 }
 
@@ -107,8 +114,6 @@ nv40_instobj_func = {
 	.addr = nv40_instobj_addr,
 	.acquire = nv40_instobj_acquire,
 	.release = nv40_instobj_release,
-	.rd32 = nv40_instobj_rd32,
-	.wr32 = nv40_instobj_wr32,
 };
 
 static int
@@ -121,9 +126,10 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 
 	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
 		return -ENOMEM;
-	*pmemory = &iobj->memory;
+	*pmemory = &iobj->base.memory;
 
-	nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+	nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
+	iobj->base.memory.ptrs = &nv40_instobj_ptrs;
 	iobj->imem = imem;
 
 	mutex_lock(&imem->base.subdev.mutex);
@@ -171,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base)
 	imem->base.reserved += 512 * 1024;	/* object storage */
 	imem->base.reserved = round_up(imem->base.reserved, 4096);
 
-	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
@@ -209,10 +215,10 @@ static void *
 nv40_instmem_dtor(struct nvkm_instmem *base)
 {
 	struct nv40_instmem *imem = nv40_instmem(base);
-	nvkm_memory_del(&imem->base.ramfc);
-	nvkm_memory_del(&imem->base.ramro);
+	nvkm_memory_unref(&imem->base.ramfc);
+	nvkm_memory_unref(&imem->base.ramro);
 	nvkm_ramht_del(&imem->base.ramht);
-	nvkm_memory_del(&imem->base.vbios);
+	nvkm_memory_unref(&imem->base.vbios);
 	nvkm_mm_fini(&imem->heap);
 	if (imem->iomem)
 		iounmap(imem->iomem);
@@ -226,7 +232,6 @@ nv40_instmem = {
 	.rd32 = nv40_instmem_rd32,
 	.wr32 = nv40_instmem_wr32,
 	.memory_new = nv40_instobj_new,
-	.persistent = false,
 	.zero = false,
 };
 
@@ -248,8 +253,8 @@ nv40_instmem_new(struct nvkm_device *device, int index,
 	else
 		bar = 3;
 
-	imem->iomem = ioremap(device->func->resource_addr(device, bar),
-			      device->func->resource_size(device, bar));
+	imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
+				 device->func->resource_size(device, bar));
 	if (!imem->iomem) {
 		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
 		return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 6d512c062ae32..1ba7289684aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -31,147 +31,293 @@
 
 struct nv50_instmem {
 	struct nvkm_instmem base;
-	unsigned long lock_flags;
-	spinlock_t lock;
 	u64 addr;
+
+	/* Mappings that can be evicted when BAR2 space has been exhausted. */
+	struct list_head lru;
 };
 
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
-#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
 
 struct nv50_instobj {
-	struct nvkm_memory memory;
+	struct nvkm_instobj base;
 	struct nv50_instmem *imem;
-	struct nvkm_mem *mem;
-	struct nvkm_vma bar;
+	struct nvkm_memory *ram;
+	struct nvkm_vma *bar;
+	refcount_t maps;
 	void *map;
+	struct list_head lru;
 };
 
-static enum nvkm_memory_target
-nv50_instobj_target(struct nvkm_memory *memory)
+static void
+nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	return NVKM_MEM_TARGET_VRAM;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+	u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imem->base.lock, flags);
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
+	}
+	nvkm_wr32(device, 0x700000 + addr, data);
+	spin_unlock_irqrestore(&imem->base.lock, flags);
 }
 
-static u64
-nv50_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
 {
-	return nv50_instobj(memory)->mem->offset;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+	u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+	u32 data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imem->base.lock, flags);
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
+	}
+	data = nvkm_rd32(device, 0x700000 + addr);
+	spin_unlock_irqrestore(&imem->base.lock, flags);
+	return data;
 }
 
-static u64
-nv50_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv50_instobj_slow = {
+	.rd32 = nv50_instobj_rd32_slow,
+	.wr32 = nv50_instobj_wr32_slow,
+};
+
+static void
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+	iowrite32_native(data, nv50_instobj(memory)->map + offset);
 }
 
+static u32
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	return ioread32_native(nv50_instobj(memory)->map + offset);
+}
+
+static const struct nvkm_memory_ptrs
+nv50_instobj_fast = {
+	.rd32 = nv50_instobj_rd32,
+	.wr32 = nv50_instobj_wr32,
+};
+
 static void
-nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
 {
-	struct nv50_instobj *iobj = nv50_instobj(memory);
-	struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+	struct nv50_instmem *imem = iobj->imem;
+	struct nv50_instobj *eobj;
+	struct nvkm_memory *memory = &iobj->base.memory;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	struct nvkm_device *device = subdev->device;
+	struct nvkm_vma *bar = NULL, *ebar;
 	u64 size = nvkm_memory_size(memory);
-	void __iomem *map;
+	void *emap;
 	int ret;
 
-	iobj->map = ERR_PTR(-ENOMEM);
-
-	ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
-	if (ret == 0) {
-		map = ioremap(device->func->resource_addr(device, 3) +
-			      (u32)iobj->bar.offset, size);
-		if (map) {
-			nvkm_memory_map(memory, &iobj->bar, 0);
-			iobj->map = map;
-		} else {
-			nvkm_warn(subdev, "PRAMIN ioremap failed\n");
-			nvkm_vm_put(&iobj->bar);
+	/* Attempt to allocate BAR2 address-space and map the object
+	 * into it.  The lock has to be dropped while doing this due
+	 * to the possibility of recursion for page table allocation.
+	 */
+	mutex_unlock(&subdev->mutex);
+	while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
+		/* Evict unused mappings, and keep retrying until we either
+		 * succeed,or there's no more objects left on the LRU.
+		 */
+		mutex_lock(&subdev->mutex);
+		eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
+		if (eobj) {
+			nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
+				   nvkm_memory_addr(&eobj->base.memory),
+				   nvkm_memory_size(&eobj->base.memory),
+				   eobj->bar->addr);
+			list_del_init(&eobj->lru);
+			ebar = eobj->bar;
+			eobj->bar = NULL;
+			emap = eobj->map;
+			eobj->map = NULL;
 		}
-	} else {
-		nvkm_warn(subdev, "PRAMIN exhausted\n");
+		mutex_unlock(&subdev->mutex);
+		if (!eobj)
+			break;
+		iounmap(emap);
+		nvkm_vmm_put(vmm, &ebar);
 	}
+
+	if (ret == 0)
+		ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
+	mutex_lock(&subdev->mutex);
+	if (ret || iobj->bar) {
+		/* We either failed, or another thread beat us. */
+		mutex_unlock(&subdev->mutex);
+		nvkm_vmm_put(vmm, &bar);
+		mutex_lock(&subdev->mutex);
+		return;
+	}
+
+	/* Make the mapping visible to the host. */
+	iobj->bar = bar;
+	iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+			       (u32)iobj->bar->addr, size);
+	if (!iobj->map) {
+		nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+		nvkm_vmm_put(vmm, &iobj->bar);
+	}
+}
+
+static int
+nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+		 struct nvkm_vma *vma, void *argv, u32 argc)
+{
+	memory = nv50_instobj(memory)->ram;
+	return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
 }
 
 static void
 nv50_instobj_release(struct nvkm_memory *memory)
 {
-	struct nv50_instmem *imem = nv50_instobj(memory)->imem;
-	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
+
+	wmb();
+	nvkm_bar_flush(subdev->device->bar);
+
+	if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+		/* Add the now-unused mapping to the LRU instead of directly
+		 * unmapping it here, in case we need to map it again later.
+		 */
+		if (likely(iobj->lru.next) && iobj->map) {
+			BUG_ON(!list_empty(&iobj->lru));
+			list_add_tail(&iobj->lru, &imem->lru);
+		}
+
+		/* Switch back to NULL accessors when last map is gone. */
+		iobj->base.memory.ptrs = NULL;
+		mutex_unlock(&subdev->mutex);
+	}
 }
 
 static void __iomem *
 nv50_instobj_acquire(struct nvkm_memory *memory)
 {
 	struct nv50_instobj *iobj = nv50_instobj(memory);
-	struct nv50_instmem *imem = iobj->imem;
-	struct nvkm_bar *bar = imem->base.subdev.device->bar;
-	struct nvkm_vm *vm;
-	unsigned long flags;
+	struct nvkm_instmem *imem = &iobj->imem->base;
+	struct nvkm_vmm *vmm;
+	void __iomem *map = NULL;
 
-	if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
-		nvkm_memory_boot(memory, vm);
-	if (!IS_ERR_OR_NULL(iobj->map))
+	/* Already mapped? */
+	if (refcount_inc_not_zero(&iobj->maps))
 		return iobj->map;
 
-	spin_lock_irqsave(&imem->lock, flags);
-	imem->lock_flags = flags;
-	return NULL;
-}
+	/* Take the lock, and re-check that another thread hasn't
+	 * already mapped the object in the meantime.
+	 */
+	mutex_lock(&imem->subdev.mutex);
+	if (refcount_inc_not_zero(&iobj->maps)) {
+		mutex_unlock(&imem->subdev.mutex);
+		return iobj->map;
+	}
 
-static u32
-nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
-	struct nv50_instobj *iobj = nv50_instobj(memory);
-	struct nv50_instmem *imem = iobj->imem;
-	struct nvkm_device *device = imem->base.subdev.device;
-	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
-	u32 data;
+	/* Attempt to get a direct CPU mapping of the object. */
+	if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
+		if (!iobj->map)
+			nv50_instobj_kmap(iobj, vmm);
+		map = iobj->map;
+	}
 
-	if (unlikely(imem->addr != base)) {
-		nvkm_wr32(device, 0x001700, base >> 16);
-		imem->addr = base;
+	if (!refcount_inc_not_zero(&iobj->maps)) {
+		/* Exclude object from eviction while it's being accessed. */
+		if (likely(iobj->lru.next))
+			list_del_init(&iobj->lru);
+
+		if (map)
+			iobj->base.memory.ptrs = &nv50_instobj_fast;
+		else
+			iobj->base.memory.ptrs = &nv50_instobj_slow;
+		refcount_inc(&iobj->maps);
 	}
-	data = nvkm_rd32(device, 0x700000 + addr);
-	return data;
+
+	mutex_unlock(&imem->subdev.mutex);
+	return map;
 }
 
 static void
-nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
 {
 	struct nv50_instobj *iobj = nv50_instobj(memory);
-	struct nv50_instmem *imem = iobj->imem;
-	struct nvkm_device *device = imem->base.subdev.device;
-	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
-
-	if (unlikely(imem->addr != base)) {
-		nvkm_wr32(device, 0x001700, base >> 16);
-		imem->addr = base;
+	struct nvkm_instmem *imem = &iobj->imem->base;
+
+	/* Exclude bootstrapped objects (ie. the page tables for the
+	 * instmem BAR itself) from eviction.
+	 */
+	mutex_lock(&imem->subdev.mutex);
+	if (likely(iobj->lru.next)) {
+		list_del_init(&iobj->lru);
+		iobj->lru.next = NULL;
 	}
-	nvkm_wr32(device, 0x700000 + addr, data);
+
+	nv50_instobj_kmap(iobj, vmm);
+	nvkm_instmem_boot(imem);
+	mutex_unlock(&imem->subdev.mutex);
 }
 
-static void
-nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
 {
-	struct nv50_instobj *iobj = nv50_instobj(memory);
-	nvkm_vm_map_at(vma, offset, iobj->mem);
+	return nvkm_memory_size(nv50_instobj(memory)->ram);
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+	return nvkm_memory_addr(nv50_instobj(memory)->ram);
+}
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+	return nvkm_memory_target(nv50_instobj(memory)->ram);
 }
 
 static void *
 nv50_instobj_dtor(struct nvkm_memory *memory)
 {
 	struct nv50_instobj *iobj = nv50_instobj(memory);
-	struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
-	if (!IS_ERR_OR_NULL(iobj->map)) {
-		nvkm_vm_put(&iobj->bar);
-		iounmap(iobj->map);
+	struct nvkm_instmem *imem = &iobj->imem->base;
+	struct nvkm_vma *bar;
+	void *map = map;
+
+	mutex_lock(&imem->subdev.mutex);
+	if (likely(iobj->lru.next))
+		list_del(&iobj->lru);
+	map = iobj->map;
+	bar = iobj->bar;
+	mutex_unlock(&imem->subdev.mutex);
+
+	if (map) {
+		struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
+		iounmap(map);
+		if (likely(vmm)) /* Can be NULL during BAR destructor. */
+			nvkm_vmm_put(vmm, &bar);
 	}
-	ram->func->put(ram, &iobj->mem);
+
+	nvkm_memory_unref(&iobj->ram);
+	nvkm_instobj_dtor(imem, &iobj->base);
 	return iobj;
 }
 
@@ -184,8 +330,6 @@ nv50_instobj_func = {
 	.boot = nv50_instobj_boot,
 	.acquire = nv50_instobj_acquire,
 	.release = nv50_instobj_release,
-	.rd32 = nv50_instobj_rd32,
-	.wr32 = nv50_instobj_wr32,
 	.map = nv50_instobj_map,
 };
 
@@ -195,25 +339,19 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 {
 	struct nv50_instmem *imem = nv50_instmem(base);
 	struct nv50_instobj *iobj;
-	struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
-	int ret;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u8 page = max(order_base_2(align), 12);
 
 	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
 		return -ENOMEM;
-	*pmemory = &iobj->memory;
+	*pmemory = &iobj->base.memory;
 
-	nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+	nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
 	iobj->imem = imem;
+	refcount_set(&iobj->maps, 0);
+	INIT_LIST_HEAD(&iobj->lru);
 
-	size  = max((size  + 4095) & ~4095, (u32)4096);
-	align = max((align + 4095) & ~4095, (u32)4096);
-
-	ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
-	if (ret)
-		return ret;
-
-	iobj->mem->page_shift = 12;
-	return 0;
+	return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
 }
 
 /******************************************************************************
@@ -230,7 +368,6 @@ static const struct nvkm_instmem_func
 nv50_instmem = {
 	.fini = nv50_instmem_fini,
 	.memory_new = nv50_instobj_new,
-	.persistent = false,
 	.zero = false,
 };
 
@@ -243,7 +380,7 @@ nv50_instmem_new(struct nvkm_device *device, int index,
 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 		return -ENOMEM;
 	nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
-	spin_lock_init(&imem->lock);
+	INIT_LIST_HEAD(&imem->lru);
 	*pimem = &imem->base;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index ace4471864a3c..44651ca42d529 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -11,10 +11,22 @@ struct nvkm_instmem_func {
 	void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
 	int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
 			  bool zero, struct nvkm_memory **);
-	bool persistent;
 	bool zero;
 };
 
 void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
 		       int index, struct nvkm_instmem *);
+void nvkm_instmem_boot(struct nvkm_instmem *);
+
+#include <core/memory.h>
+
+struct nvkm_instobj {
+	struct nvkm_memory memory;
+	struct list_head head;
+	u32 *suspend;
+};
+
+void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+		       struct nvkm_instmem *, struct nvkm_instobj *);
+void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 0c7ef250dcafc..1f185274d3e63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,26 +23,12 @@
  */
 #include "priv.h"
 
-#include <subdev/fb.h>
-
-int
-nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
-{
-	int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
-	if (ret)
-		*pnode = NULL;
-	return ret;
-}
-
-void
-nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
-{
-	nvkm_mm_free(&ltc->tags, pnode);
-}
+#include <core/memory.h>
 
 void
-nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
+nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
 {
+	struct nvkm_ltc *ltc = device->ltc;
 	const u32 limit = first + count - 1;
 
 	BUG_ON((first > limit) || (limit >= ltc->num_tags));
@@ -116,10 +102,7 @@ static void *
 nvkm_ltc_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
-	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
-	nvkm_mm_fini(&ltc->tags);
-	if (ram)
-		nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+	nvkm_memory_unref(&ltc->tag_ram);
 	return ltc;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 4a0fa0a9b8028..a21ef45b85728 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,6 +23,7 @@
  */
 #include "priv.h"
 
+#include <core/memory.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
@@ -152,7 +153,10 @@ gf100_ltc_flush(struct nvkm_ltc *ltc)
 int
 gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+	struct nvkm_device *device = ltc->subdev.device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_ram *ram = fb->ram;
+	u32 bits = (nvkm_rd32(device, 0x100c80) & 0x00001000) ? 16 : 17;
 	u32 tag_size, tag_margin, tag_align;
 	int ret;
 
@@ -164,8 +168,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 
 	/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
 	ltc->num_tags = (ram->size >> 17) / 4;
-	if (ltc->num_tags > (1 << 17))
-		ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+	if (ltc->num_tags > (1 << bits))
+		ltc->num_tags = 1 << bits; /* we have 16/17 bits in PTE */
 	ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
 
 	tag_align = ltc->ltc_nr * 0x800;
@@ -181,14 +185,13 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 	 */
 	tag_size  = (ltc->num_tags / 64) * 0x6000 + tag_margin;
 	tag_size += tag_align;
-	tag_size  = (tag_size + 0xfff) >> 12; /* round up */
 
-	ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
-			   &ltc->tag_ram);
+	ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 12, tag_size,
+			   true, true, &ltc->tag_ram);
 	if (ret) {
 		ltc->num_tags = 0;
 	} else {
-		u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
+		u64 tag_base = nvkm_memory_addr(ltc->tag_ram) + tag_margin;
 
 		tag_base += tag_align - 1;
 		do_div(tag_base, tag_align);
@@ -197,7 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 	}
 
 mm_init:
-	return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
+	nvkm_mm_fini(&fb->tags);
+	return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index 0bdfb2f40266d..e34d421080196 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
 	ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
 	ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
 	/*XXX: tagram allocation - TBD */
-	return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+	return 0;
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 012c9db687b21..352a65f9371cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -3,4 +3,33 @@ nvkm-y += nvkm/subdev/mmu/nv04.o
 nvkm-y += nvkm/subdev/mmu/nv41.o
 nvkm-y += nvkm/subdev/mmu/nv44.o
 nvkm-y += nvkm/subdev/mmu/nv50.o
+nvkm-y += nvkm/subdev/mmu/g84.o
 nvkm-y += nvkm/subdev/mmu/gf100.o
+nvkm-y += nvkm/subdev/mmu/gk104.o
+nvkm-y += nvkm/subdev/mmu/gk20a.o
+nvkm-y += nvkm/subdev/mmu/gm200.o
+nvkm-y += nvkm/subdev/mmu/gm20b.o
+nvkm-y += nvkm/subdev/mmu/gp100.o
+nvkm-y += nvkm/subdev/mmu/gp10b.o
+
+nvkm-y += nvkm/subdev/mmu/mem.o
+nvkm-y += nvkm/subdev/mmu/memnv04.o
+nvkm-y += nvkm/subdev/mmu/memnv50.o
+nvkm-y += nvkm/subdev/mmu/memgf100.o
+
+nvkm-y += nvkm/subdev/mmu/vmm.o
+nvkm-y += nvkm/subdev/mmu/vmmnv04.o
+nvkm-y += nvkm/subdev/mmu/vmmnv41.o
+nvkm-y += nvkm/subdev/mmu/vmmnv44.o
+nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmgf100.o
+nvkm-y += nvkm/subdev/mmu/vmmgk104.o
+nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
+nvkm-y += nvkm/subdev/mmu/vmmgm200.o
+nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
+nvkm-y += nvkm/subdev/mmu/vmmgp100.o
+nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
+
+nvkm-y += nvkm/subdev/mmu/umem.o
+nvkm-y += nvkm/subdev/mmu/ummu.o
+nvkm-y += nvkm/subdev/mmu/uvmm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 455da298227f6..ee11ccaf0563c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,480 +21,367 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ummu.h"
+#include "vmm.h"
 
-#include <core/gpuobj.h>
+#include <subdev/bar.h>
 #include <subdev/fb.h>
 
-void
-nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
-{
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_mm_node *r = node->mem;
-	int big = vma->node->type != mmu->func->spg_shift;
-	u32 offset = vma->node->offset + (delta >> 12);
-	u32 bits = vma->node->type - 12;
-	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->func->pgt_bits - bits);
-	u32 end, len;
-
-	delta = 0;
-	while (r) {
-		u64 phys = (u64)r->offset << 12;
-		u32 num  = r->length >> bits;
-
-		while (num) {
-			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
-			end = (pte + num);
-			if (unlikely(end >= max))
-				end = max;
-			len = end - pte;
-
-			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
-
-			num -= len;
-			pte += len;
-			if (unlikely(end >= max)) {
-				phys += len << (bits + 12);
-				pde++;
-				pte = 0;
-			}
-
-			delta += (u64)len << vma->node->type;
-		}
-		r = r->next;
-	};
-
-	mmu->func->flush(vm);
-}
+#include <nvif/if500d.h>
+#include <nvif/if900d.h>
 
-static void
-nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
-		     struct nvkm_mem *mem)
-{
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->func->spg_shift;
-	u32 offset = vma->node->offset + (delta >> 12);
-	u32 bits = vma->node->type - 12;
-	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->func->pgt_bits - bits);
-	unsigned m, sglen;
-	u32 end, len;
-	int i;
-	struct scatterlist *sg;
-
-	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
-		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
-		end = pte + sglen;
-		if (unlikely(end >= max))
-			end = max;
-		len = end - pte;
-
-		for (m = 0; m < len; m++) {
-			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
-			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
-			num--;
-			pte++;
-
-			if (num == 0)
-				goto finish;
-		}
-		if (unlikely(end >= max)) {
-			pde++;
-			pte = 0;
-		}
-		if (m < sglen) {
-			for (; m < sglen; m++) {
-				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
-				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
-				num--;
-				pte++;
-				if (num == 0)
-					goto finish;
-			}
-		}
-
-	}
-finish:
-	mmu->func->flush(vm);
-}
+struct nvkm_mmu_ptp {
+	struct nvkm_mmu_pt *pt;
+	struct list_head head;
+	u8  shift;
+	u16 mask;
+	u16 free;
+};
 
 static void
-nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
-	       struct nvkm_mem *mem)
+nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
 {
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
-	dma_addr_t *list = mem->pages;
-	int big = vma->node->type != mmu->func->spg_shift;
-	u32 offset = vma->node->offset + (delta >> 12);
-	u32 bits = vma->node->type - 12;
-	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->func->pgt_bits - bits);
-	u32 end, len;
-
-	while (num) {
-		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
-		end = (pte + num);
-		if (unlikely(end >= max))
-			end = max;
-		len = end - pte;
-
-		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
-
-		num  -= len;
-		pte  += len;
-		list += len;
-		if (unlikely(end >= max)) {
-			pde++;
-			pte = 0;
-		}
+	const int slot = pt->base >> pt->ptp->shift;
+	struct nvkm_mmu_ptp *ptp = pt->ptp;
+
+	/* If there were no free slots in the parent allocation before,
+	 * there will be now, so return PTP to the cache.
+	 */
+	if (!ptp->free)
+		list_add(&ptp->head, &mmu->ptp.list);
+	ptp->free |= BIT(slot);
+
+	/* If there's no more sub-allocations, destroy PTP. */
+	if (ptp->free == ptp->mask) {
+		nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
+		list_del(&ptp->head);
+		kfree(ptp);
 	}
 
-	mmu->func->flush(vm);
+	kfree(pt);
 }
 
-void
-nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
 {
-	if (node->sg)
-		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
-	else
-	if (node->pages)
-		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
-	else
-		nvkm_vm_map_at(vma, 0, node);
-}
+	struct nvkm_mmu_pt *pt;
+	struct nvkm_mmu_ptp *ptp;
+	int slot;
+
+	if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
+		return NULL;
+
+	ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
+	if (!ptp) {
+		/* Need to allocate a new parent to sub-allocate from. */
+		if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
+			kfree(pt);
+			return NULL;
+		}
 
-void
-nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
-{
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->func->spg_shift;
-	u32 offset = vma->node->offset + (delta >> 12);
-	u32 bits = vma->node->type - 12;
-	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->func->pgt_bits - bits);
-	u32 end, len;
-
-	while (num) {
-		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
-		end = (pte + num);
-		if (unlikely(end >= max))
-			end = max;
-		len = end - pte;
-
-		mmu->func->unmap(vma, pgt, pte, len);
-
-		num -= len;
-		pte += len;
-		if (unlikely(end >= max)) {
-			pde++;
-			pte = 0;
+		ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
+		if (!ptp->pt) {
+			kfree(ptp);
+			kfree(pt);
+			return NULL;
 		}
-	}
 
-	mmu->func->flush(vm);
+		ptp->shift = order_base_2(size);
+		slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
+		ptp->mask = (1 << slot) - 1;
+		ptp->free = ptp->mask;
+		list_add(&ptp->head, &mmu->ptp.list);
+	}
+	pt->ptp = ptp;
+	pt->sub = true;
+
+	/* Sub-allocate from parent object, removing PTP from cache
+	 * if there's no more free slots left.
+	 */
+	slot = __ffs(ptp->free);
+	ptp->free &= ~BIT(slot);
+	if (!ptp->free)
+		list_del(&ptp->head);
+
+	pt->memory = pt->ptp->pt->memory;
+	pt->base = slot << ptp->shift;
+	pt->addr = pt->ptp->pt->addr + pt->base;
+	return pt;
 }
 
-void
-nvkm_vm_unmap(struct nvkm_vma *vma)
-{
-	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
+struct nvkm_mmu_ptc {
+	struct list_head head;
+	struct list_head item;
+	u32 size;
+	u32 refs;
+};
 
-static void
-nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
+static inline struct nvkm_mmu_ptc *
+nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_vm_pgd *vpgd;
-	struct nvkm_vm_pgt *vpgt;
-	struct nvkm_memory *pgt;
-	u32 pde;
-
-	for (pde = fpde; pde <= lpde; pde++) {
-		vpgt = &vm->pgt[pde - vm->fpde];
-		if (--vpgt->refcount[big])
-			continue;
-
-		pgt = vpgt->mem[big];
-		vpgt->mem[big] = NULL;
-
-		list_for_each_entry(vpgd, &vm->pgd_list, head) {
-			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
-		}
+	struct nvkm_mmu_ptc *ptc;
 
-		mmu->func->flush(vm);
+	list_for_each_entry(ptc, &mmu->ptc.list, head) {
+		if (ptc->size == size)
+			return ptc;
+	}
 
-		nvkm_memory_del(&pgt);
+	ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
+	if (ptc) {
+		INIT_LIST_HEAD(&ptc->item);
+		ptc->size = size;
+		ptc->refs = 0;
+		list_add(&ptc->head, &mmu->ptc.list);
 	}
+
+	return ptc;
 }
 
-static int
-nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
+void
+nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-	struct nvkm_vm_pgd *vpgd;
-	int big = (type != mmu->func->spg_shift);
-	u32 pgt_size;
-	int ret;
-
-	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
-	pgt_size *= 8;
-
-	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
-			      pgt_size, 0x1000, true, &vpgt->mem[big]);
-	if (unlikely(ret))
-		return ret;
+	struct nvkm_mmu_pt *pt = *ppt;
+	if (pt) {
+		/* Handle sub-allocated page tables. */
+		if (pt->sub) {
+			mutex_lock(&mmu->ptp.mutex);
+			nvkm_mmu_ptp_put(mmu, force, pt);
+			mutex_unlock(&mmu->ptp.mutex);
+			return;
+		}
 
-	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
+		/* Either cache or free the object. */
+		mutex_lock(&mmu->ptc.mutex);
+		if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
+			list_add_tail(&pt->head, &pt->ptc->item);
+			pt->ptc->refs++;
+		} else {
+			nvkm_memory_unref(&pt->memory);
+			kfree(pt);
+		}
+		mutex_unlock(&mmu->ptc.mutex);
 	}
-
-	vpgt->refcount[big]++;
-	return 0;
 }
 
-int
-nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
-	    struct nvkm_vma *vma)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
-	u32 align = (1 << page_shift) >> 12;
-	u32 msize = size >> 12;
-	u32 fpde, lpde, pde;
+	struct nvkm_mmu_ptc *ptc;
+	struct nvkm_mmu_pt *pt;
 	int ret;
 
-	mutex_lock(&vm->mutex);
-	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
-			   &vma->node);
-	if (unlikely(ret != 0)) {
-		mutex_unlock(&vm->mutex);
-		return ret;
+	/* Sub-allocated page table (ie. GP100 LPT). */
+	if (align < 0x1000) {
+		mutex_lock(&mmu->ptp.mutex);
+		pt = nvkm_mmu_ptp_get(mmu, align, zero);
+		mutex_unlock(&mmu->ptp.mutex);
+		return pt;
 	}
 
-	fpde = (vma->node->offset >> mmu->func->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+	/* Lookup cache for this page table size. */
+	mutex_lock(&mmu->ptc.mutex);
+	ptc = nvkm_mmu_ptc_find(mmu, size);
+	if (!ptc) {
+		mutex_unlock(&mmu->ptc.mutex);
+		return NULL;
+	}
 
-	for (pde = fpde; pde <= lpde; pde++) {
-		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-		int big = (vma->node->type != mmu->func->spg_shift);
+	/* If there's a free PT in the cache, reuse it. */
+	pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
+	if (pt) {
+		if (zero)
+			nvkm_fo64(pt->memory, 0, 0, size >> 3);
+		list_del(&pt->head);
+		ptc->refs--;
+		mutex_unlock(&mmu->ptc.mutex);
+		return pt;
+	}
+	mutex_unlock(&mmu->ptc.mutex);
 
-		if (likely(vpgt->refcount[big])) {
-			vpgt->refcount[big]++;
-			continue;
-		}
+	/* No such luck, we need to allocate. */
+	if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
+		return NULL;
+	pt->ptc = ptc;
+	pt->sub = false;
 
-		ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
-		if (ret) {
-			if (pde != fpde)
-				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
-			nvkm_mm_free(&vm->mm, &vma->node);
-			mutex_unlock(&vm->mutex);
-			return ret;
-		}
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      size, align, zero, &pt->memory);
+	if (ret) {
+		kfree(pt);
+		return NULL;
 	}
-	mutex_unlock(&vm->mutex);
 
-	vma->vm = NULL;
-	nvkm_vm_ref(vm, &vma->vm, NULL);
-	vma->offset = (u64)vma->node->offset << 12;
-	vma->access = access;
-	return 0;
+	pt->base = 0;
+	pt->addr = nvkm_memory_addr(pt->memory);
+	return pt;
 }
 
 void
-nvkm_vm_put(struct nvkm_vma *vma)
-{
-	struct nvkm_mmu *mmu;
-	struct nvkm_vm *vm;
-	u32 fpde, lpde;
-
-	if (unlikely(vma->node == NULL))
-		return;
-	vm = vma->vm;
-	mmu = vm->mmu;
-
-	fpde = (vma->node->offset >> mmu->func->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
-
-	mutex_lock(&vm->mutex);
-	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
-	nvkm_mm_free(&vm->mm, &vma->node);
-	mutex_unlock(&vm->mutex);
-
-	nvkm_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
+nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_memory *pgt;
-	int ret;
-
-	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
-			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
-	if (ret == 0) {
-		vm->pgt[0].refcount[0] = 1;
-		vm->pgt[0].mem[0] = pgt;
-		nvkm_memory_boot(pgt, vm);
+	struct nvkm_mmu_ptc *ptc;
+	list_for_each_entry(ptc, &mmu->ptc.list, head) {
+		struct nvkm_mmu_pt *pt, *tt;
+		list_for_each_entry_safe(pt, tt, &ptc->item, head) {
+			nvkm_memory_unref(&pt->memory);
+			list_del(&pt->head);
+			kfree(pt);
+		}
 	}
-
-	return ret;
 }
 
-int
-nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
 {
-	static struct lock_class_key _key;
-	struct nvkm_vm *vm;
-	u64 mm_length = (offset + length) - mm_offset;
-	int ret;
-
-	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
-	if (!vm)
-		return -ENOMEM;
+	struct nvkm_mmu_ptc *ptc, *ptct;
 
-	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
-	INIT_LIST_HEAD(&vm->pgd_list);
-	vm->mmu = mmu;
-	kref_init(&vm->refcount);
-	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
-	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
-
-	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
-	if (!vm->pgt) {
-		kfree(vm);
-		return -ENOMEM;
+	list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
+		WARN_ON(!list_empty(&ptc->item));
+		list_del(&ptc->head);
+		kfree(ptc);
 	}
-
-	ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
-			   block >> 12);
-	if (ret) {
-		vfree(vm->pgt);
-		kfree(vm);
-		return ret;
-	}
-
-	*pvm = vm;
-
-	return 0;
 }
 
-int
-nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
-	    struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
 {
-	struct nvkm_mmu *mmu = device->mmu;
-	if (!mmu->func->create)
-		return -EINVAL;
-	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
+	mutex_init(&mmu->ptc.mutex);
+	INIT_LIST_HEAD(&mmu->ptc.list);
+	mutex_init(&mmu->ptp.mutex);
+	INIT_LIST_HEAD(&mmu->ptp.list);
 }
 
-static int
-nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_vm_pgd *vpgd;
-	int i;
-
-	if (!pgd)
-		return 0;
-
-	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
-	if (!vpgd)
-		return -ENOMEM;
-
-	vpgd->obj = pgd;
-
-	mutex_lock(&vm->mutex);
-	for (i = vm->fpde; i <= vm->lpde; i++)
-		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
-	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&vm->mutex);
-	return 0;
+	if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+		mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+		mmu->type[mmu->type_nr].heap = heap;
+		mmu->type_nr++;
+	}
 }
 
-static void
-nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
 {
-	struct nvkm_vm_pgd *vpgd, *tmp;
-
-	if (!mpgd)
-		return;
-
-	mutex_lock(&vm->mutex);
-	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
-		if (vpgd->obj == mpgd) {
-			list_del(&vpgd->head);
-			kfree(vpgd);
-			break;
+	if (size) {
+		if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+			mmu->heap[mmu->heap_nr].type = type;
+			mmu->heap[mmu->heap_nr].size = size;
+			return mmu->heap_nr++;
 		}
 	}
-	mutex_unlock(&vm->mutex);
+	return -EINVAL;
 }
 
 static void
-nvkm_vm_del(struct kref *kref)
+nvkm_mmu_host(struct nvkm_mmu *mmu)
 {
-	struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
-	struct nvkm_vm_pgd *vpgd, *tmp;
-
-	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
-		nvkm_vm_unlink(vm, vpgd->obj);
-	}
-
-	nvkm_mm_fini(&vm->mm);
-	vfree(vm->pgt);
-	kfree(vm);
+	struct nvkm_device *device = mmu->subdev.device;
+	u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+	int heap;
+
+	/* Non-mappable system memory. */
+	heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+	nvkm_mmu_type(mmu, heap, type);
+
+	/* Non-coherent, cached, system memory.
+	 *
+	 * Block-linear mappings of system memory must be done through
+	 * BAR1, and cannot be supported on systems where we're unable
+	 * to map BAR1 with write-combining.
+	 */
+	type |= NVKM_MEM_MAPPABLE;
+	if (!device->bar || device->bar->iomap_uncached)
+		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+	else
+		nvkm_mmu_type(mmu, heap, type);
+
+	/* Coherent, cached, system memory.
+	 *
+	 * Unsupported on systems that aren't able to support snooped
+	 * mappings, and also for block-linear mappings which must be
+	 * done through BAR1.
+	 */
+	type |= NVKM_MEM_COHERENT;
+	if (device->func->cpu_coherent)
+		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+	/* Uncached system memory. */
+	nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
 }
 
-int
-nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
 {
-	if (ref) {
-		int ret = nvkm_vm_link(ref, pgd);
-		if (ret)
-			return ret;
-
-		kref_get(&ref->refcount);
-	}
+	struct nvkm_device *device = mmu->subdev.device;
+	struct nvkm_mm *mm = &device->fb->ram->vram;
+	const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+	const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+	const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+	u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+	u8 heap = NVKM_MEM_VRAM;
+	int heapM, heapN, heapU;
+
+	/* Mixed-memory doesn't support compression or display. */
+	heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+	heap |= NVKM_MEM_COMP;
+	heap |= NVKM_MEM_DISP;
+	heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+	heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+	/* Add non-mappable VRAM types first so that they're preferred
+	 * over anything else.  Mixed-memory will be slower than other
+	 * heaps, it's prioritised last.
+	 */
+	nvkm_mmu_type(mmu, heapU, type);
+	nvkm_mmu_type(mmu, heapN, type);
+	nvkm_mmu_type(mmu, heapM, type);
+
+	/* Add host memory types next, under the assumption that users
+	 * wanting mappable memory want to use them as staging buffers
+	 * or the like.
+	 */
+	nvkm_mmu_host(mmu);
+
+	/* Mappable VRAM types go last, as they're basically the worst
+	 * possible type to ask for unless there's no other choice.
+	 */
+	if (device->bar) {
+		/* Write-combined BAR1 access. */
+		type |= NVKM_MEM_MAPPABLE;
+		if (!device->bar->iomap_uncached) {
+			nvkm_mmu_type(mmu, heapN, type);
+			nvkm_mmu_type(mmu, heapM, type);
+		}
 
-	if (*ptr) {
-		nvkm_vm_unlink(*ptr, pgd);
-		kref_put(&(*ptr)->refcount, nvkm_vm_del);
+		/* Uncached BAR1 access. */
+		type |= NVKM_MEM_COHERENT;
+		type |= NVKM_MEM_UNCACHED;
+		nvkm_mmu_type(mmu, heapN, type);
+		nvkm_mmu_type(mmu, heapM, type);
 	}
-
-	*ptr = ref;
-	return 0;
 }
 
 static int
 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
 {
 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
-	if (mmu->func->oneinit)
-		return mmu->func->oneinit(mmu);
+
+	/* Determine available memory types. */
+	if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+		nvkm_mmu_vram(mmu);
+	else
+		nvkm_mmu_host(mmu);
+
+	if (mmu->func->vmm.global) {
+		int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
+				       "gart", &mmu->vmm);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -511,8 +398,10 @@ static void *
 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
-	if (mmu->func->dtor)
-		return mmu->func->dtor(mmu);
+
+	nvkm_vmm_unref(&mmu->vmm);
+
+	nvkm_mmu_ptc_fini(mmu);
 	return mmu;
 }
 
@@ -529,9 +418,10 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
 {
 	nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
 	mmu->func = func;
-	mmu->limit = func->limit;
 	mmu->dma_bits = func->dma_bits;
-	mmu->lpg_shift = func->lpg_shift;
+	nvkm_mmu_ptc_init(mmu);
+	mmu->user.ctor = nvkm_ummu_new;
+	mmu->user.base = func->mmu.user;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
new file mode 100644
index 0000000000000..8accda5a772b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+g84_mmu = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
+	.kind = nv50_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 7ac507c927bb0..2d075246dc460 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,197 +21,65 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
 
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/timer.h>
-
-#include <core/gpuobj.h>
+#include <nvif/class.h>
 
 /* Map from compressed to corresponding uncompressed storage type.
  * The value 0xff represents an invalid storage type.
  */
-const u8 gf100_pte_storage_type_map[256] =
-{
-	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
-	0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
-	0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
-	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
-	0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
-	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
-	0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
-	0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
-	0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
-	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
-	0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
-	0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
-	0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
-	0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
-	0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
-	0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
-	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
-	0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
-};
-
-
-static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
-{
-	u32 pde[2] = { 0, 0 };
-
-	if (pgt[0])
-		pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
-	if (pgt[1])
-		pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
-
-	nvkm_kmap(pgd);
-	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
-	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
-	nvkm_done(pgd);
-}
-
-static inline u64
-gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
-	phys >>= 8;
-
-	phys |= 0x00000001; /* present */
-	if (vma->access & NV_MEM_ACCESS_SYS)
-		phys |= 0x00000002;
-
-	phys |= ((u64)target  << 32);
-	phys |= ((u64)memtype << 36);
-	return phys;
-}
-
-static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	     struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
-	u64 next = 1 << (vma->node->type - 8);
-
-	phys  = gf100_vm_addr(vma, phys, mem->memtype, 0);
-	pte <<= 3;
-
-	if (mem->tag) {
-		struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
-		u32 tag = mem->tag->offset + (delta >> 17);
-		phys |= (u64)tag << (32 + 12);
-		next |= (u64)1   << (32 + 12);
-		nvkm_ltc_tags_clear(ltc, tag, cnt);
-	}
-
-	nvkm_kmap(pgt);
-	while (cnt--) {
-		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
-		phys += next;
-		pte  += 8;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-		struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
-	/* compressed storage types are invalid for system memory */
-	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
-
-	nvkm_kmap(pgt);
-	pte <<= 3;
-	while (cnt--) {
-		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
-		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
-		pte += 8;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-	nvkm_kmap(pgt);
-	pte <<= 3;
-	while (cnt--) {
-		nvkm_wo32(pgt, pte + 0, 0x00000000);
-		nvkm_wo32(pgt, pte + 4, 0x00000000);
-		pte += 8;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-gf100_vm_flush(struct nvkm_vm *vm)
-{
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_device *device = mmu->subdev.device;
-	struct nvkm_vm_pgd *vpgd;
-	u32 type;
-
-	type = 0x00000001; /* PAGE_ALL */
-	if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
-		type |= 0x00000004; /* HUB_ONLY */
-
-	mutex_lock(&mmu->subdev.mutex);
-	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		/* looks like maybe a "free flush slots" counter, the
-		 * faster you write to 0x100cbc to more it decreases
-		 */
-		nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
-				break;
-		);
-
-		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
-		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
-
-		/* wait for flush to be queued? */
-		nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x100c80) & 0x00008000)
-				break;
-		);
-	}
-	mutex_unlock(&mmu->subdev.mutex);
-}
-
-static int
-gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-		struct lock_class_key *key, struct nvkm_vm **pvm)
+const u8 *
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
 {
-	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
+	static const u8
+	kind[256] = {
+		0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+		0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+		0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+		0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+		0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+		0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+		0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+		0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+		0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+		0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+		0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+		0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+		0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+		0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+		0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+		0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+		0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+	};
+
+	*count = ARRAY_SIZE(kind);
+	return kind;
 }
 
 static const struct nvkm_mmu_func
 gf100_mmu = {
-	.limit = (1ULL << 40),
 	.dma_bits = 40,
-	.pgt_bits  = 27 - 12,
-	.spg_shift = 12,
-	.lpg_shift = 17,
-	.create = gf100_vm_create,
-	.map_pgt = gf100_vm_map_pgt,
-	.map = gf100_vm_map,
-	.map_sg = gf100_vm_map_sg,
-	.unmap = gf100_vm_unmap,
-	.flush = gf100_vm_flush,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
+	.kind = gf100_mmu_kind,
+	.kind_sys = true,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
new file mode 100644
index 0000000000000..3d7d1eb1cff90
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk104_mmu = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
+	.kind = gf100_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
new file mode 100644
index 0000000000000..ac74965a60d4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk20a_mmu = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
+	.kind = gf100_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
new file mode 100644
index 0000000000000..dbf644ebac97b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+const u8 *
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+{
+	static const u8
+	kind[256] = {
+		0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+		0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+		0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+		0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+		0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+		0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+		0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+		0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+		0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+		0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+		0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+		0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+		0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+		0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+		0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+		0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+		0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+	};
+	*count = ARRAY_SIZE(kind);
+	return kind;
+}
+
+static const struct nvkm_mmu_func
+gm200_mmu = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm200_mmu_fixed = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->fb->page)
+		return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
+	return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
new file mode 100644
index 0000000000000..7353a94b40914
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gm20b_mmu = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm20b_mmu_fixed = {
+	.dma_bits = 40,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->fb->page)
+		return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
+	return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
new file mode 100644
index 0000000000000..651b8805c67c0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp100_mmu = {
+	.dma_bits = 47,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+		return gm200_mmu_new(device, index, pmmu);
+	return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
new file mode 100644
index 0000000000000..3bd3db31e0bbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp10b_mmu = {
+	.dma_bits = 47,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+		return gm20b_mmu_new(device, index, pmmu);
+	return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
new file mode 100644
index 0000000000000..39808489f21d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
+#include "mem.h"
+
+#include <core/memory.h>
+
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+struct nvkm_mem {
+	struct nvkm_memory memory;
+	enum nvkm_memory_target target;
+	struct nvkm_mmu *mmu;
+	u64 pages;
+	struct page **mem;
+	union {
+		struct scatterlist *sgl;
+		dma_addr_t *dma;
+	};
+};
+
+static enum nvkm_memory_target
+nvkm_mem_target(struct nvkm_memory *memory)
+{
+	return nvkm_mem(memory)->target;
+}
+
+static u8
+nvkm_mem_page(struct nvkm_memory *memory)
+{
+	return PAGE_SHIFT;
+}
+
+static u64
+nvkm_mem_addr(struct nvkm_memory *memory)
+{
+	struct nvkm_mem *mem = nvkm_mem(memory);
+	if (mem->pages == 1 && mem->mem)
+		return mem->dma[0];
+	return ~0ULL;
+}
+
+static u64
+nvkm_mem_size(struct nvkm_memory *memory)
+{
+	return nvkm_mem(memory)->pages << PAGE_SHIFT;
+}
+
+static int
+nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+		 struct nvkm_vma *vma, void *argv, u32 argc)
+{
+	struct nvkm_mem *mem = nvkm_mem(memory);
+	struct nvkm_vmm_map map = {
+		.memory = &mem->memory,
+		.offset = offset,
+		.dma = mem->dma,
+	};
+	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static void *
+nvkm_mem_dtor(struct nvkm_memory *memory)
+{
+	struct nvkm_mem *mem = nvkm_mem(memory);
+	if (mem->mem) {
+		while (mem->pages--) {
+			dma_unmap_page(mem->mmu->subdev.device->dev,
+				       mem->dma[mem->pages], PAGE_SIZE,
+				       DMA_BIDIRECTIONAL);
+			__free_page(mem->mem[mem->pages]);
+		}
+		kvfree(mem->dma);
+		kvfree(mem->mem);
+	}
+	return mem;
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_dma = {
+	.dtor = nvkm_mem_dtor,
+	.target = nvkm_mem_target,
+	.page = nvkm_mem_page,
+	.addr = nvkm_mem_addr,
+	.size = nvkm_mem_size,
+	.map = nvkm_mem_map_dma,
+};
+
+static int
+nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+		 struct nvkm_vma *vma, void *argv, u32 argc)
+{
+	struct nvkm_mem *mem = nvkm_mem(memory);
+	struct nvkm_vmm_map map = {
+		.memory = &mem->memory,
+		.offset = offset,
+		.sgl = mem->sgl,
+	};
+	return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_sgl = {
+	.dtor = nvkm_mem_dtor,
+	.target = nvkm_mem_target,
+	.page = nvkm_mem_page,
+	.addr = nvkm_mem_addr,
+	.size = nvkm_mem_size,
+	.map = nvkm_mem_map_sgl,
+};
+
+int
+nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
+{
+	struct nvkm_mem *mem = nvkm_mem(memory);
+	if (mem->mem) {
+		*pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
+		return *pmap ? 0 : -EFAULT;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+		  void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+	struct device *dev = mmu->subdev.device->dev;
+	union {
+		struct nvif_mem_ram_vn vn;
+		struct nvif_mem_ram_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	enum nvkm_memory_target target;
+	struct nvkm_mem *mem;
+	gfp_t gfp = GFP_USER | __GFP_ZERO;
+
+	if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
+	    !(mmu->type[type].type & NVKM_MEM_UNCACHED))
+		target = NVKM_MEM_TARGET_HOST;
+	else
+		target = NVKM_MEM_TARGET_NCOH;
+
+	if (page != PAGE_SHIFT)
+		return -EINVAL;
+
+	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+		return -ENOMEM;
+	mem->target = target;
+	mem->mmu = mmu;
+	*pmemory = &mem->memory;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		if (args->v0.dma) {
+			nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+			mem->dma = args->v0.dma;
+		} else {
+			nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
+			mem->sgl = args->v0.sgl;
+		}
+
+		if (!IS_ALIGNED(size, PAGE_SIZE))
+			return -EINVAL;
+		mem->pages = size >> PAGE_SHIFT;
+		return 0;
+	} else
+	if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		kfree(mem);
+		return ret;
+	}
+
+	nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+	size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+	if (!(mem->mem = kvmalloc(sizeof(*mem->mem) * size, GFP_KERNEL)))
+		return -ENOMEM;
+	if (!(mem->dma = kvmalloc(sizeof(*mem->dma) * size, GFP_KERNEL)))
+		return -ENOMEM;
+
+	if (mmu->dma_bits > 32)
+		gfp |= GFP_HIGHUSER;
+	else
+		gfp |= GFP_DMA32;
+
+	for (mem->pages = 0; size; size--, mem->pages++) {
+		struct page *p = alloc_page(gfp);
+		if (!p)
+			return -ENOMEM;
+
+		mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
+						    p, 0, PAGE_SIZE,
+						    DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, mem->dma[mem->pages])) {
+			__free_page(p);
+			return -ENOMEM;
+		}
+
+		mem->mem[mem->pages] = p;
+	}
+
+	return 0;
+}
+
+int
+nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+		  void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = NULL;
+	int ret;
+
+	if (mmu->type[type].type & NVKM_MEM_VRAM) {
+		ret = mmu->func->mem.vram(mmu, type, page, size,
+					  argv, argc, &memory);
+	} else {
+		ret = nvkm_mem_new_host(mmu, type, page, size,
+					argv, argc, &memory);
+	}
+
+	if (ret)
+		nvkm_memory_unref(&memory);
+	*pmemory = memory;
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
new file mode 100644
index 0000000000000..234267e1b215b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_MEM_H__
+#define __NVKM_MEM_H__
+#include "priv.h"
+
+int nvkm_mem_new_type(struct nvkm_mmu *, int type, u8 page, u64 size,
+		      void *argv, u32 argc, struct nvkm_memory **);
+int nvkm_mem_map_host(struct nvkm_memory *, void **pmap);
+
+int nv04_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+		 struct nvkm_memory **);
+int nv04_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+		 u64 *, u64 *, struct nvkm_vma **);
+
+int nv50_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+		 struct nvkm_memory **);
+int nv50_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+		 u64 *, u64 *, struct nvkm_vma **);
+
+int gf100_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+		  struct nvkm_memory **);
+int gf100_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+		  u64 *, u64 *, struct nvkm_vma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
new file mode 100644
index 0000000000000..d9c9bee452229
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+int
+gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+	      u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+	struct gf100_vmm_map_v0 uvmm = {};
+	union {
+		struct gf100_mem_map_vn vn;
+		struct gf100_mem_map_v0 v0;
+	} *args = argv;
+	struct nvkm_device *device = mmu->subdev.device;
+	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		uvmm.ro   = args->v0.ro;
+		uvmm.kind = args->v0.kind;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+	} else
+		return ret;
+
+	ret = nvkm_vmm_get(bar, nvkm_memory_page(memory),
+				nvkm_memory_size(memory), pvma);
+	if (ret)
+		return ret;
+
+	ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+	if (ret)
+		return ret;
+
+	*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+	*psize = (*pvma)->size;
+	return 0;
+}
+
+int
+gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+	      void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+	union {
+		struct gf100_mem_vn vn;
+		struct gf100_mem_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	bool contig;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		contig = args->v0.contig;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		contig = false;
+	} else
+		return ret;
+
+	if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
+		type = NVKM_RAM_MM_NORMAL;
+	else
+		type = NVKM_RAM_MM_MIXED;
+
+	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+			    size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
new file mode 100644
index 0000000000000..79a3b0cc9f5be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/fb.h>
+
+#include <nvif/if000b.h>
+#include <nvif/unpack.h>
+
+int
+nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+	     u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+	union {
+		struct nv04_mem_map_vn vn;
+	} *args = argv;
+	struct nvkm_device *device = mmu->subdev.device;
+	const u64 addr = nvkm_memory_addr(memory);
+	int ret = -ENOSYS;
+
+	if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+		return ret;
+
+	*paddr = device->func->resource_addr(device, 1) + addr;
+	*psize = nvkm_memory_size(memory);
+	*pvma = ERR_PTR(-ENODEV);
+	return 0;
+}
+
+int
+nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+	     void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+	union {
+		struct nv04_mem_vn vn;
+	} *args = argv;
+	int ret = -ENOSYS;
+
+	if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+		return ret;
+
+	if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
+		type = NVKM_RAM_MM_NORMAL;
+	else
+		type = NVKM_RAM_MM_NOMAP;
+
+	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+			    size, true, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
new file mode 100644
index 0000000000000..46759b89fc1fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+int
+nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+	     u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+	struct nv50_vmm_map_v0 uvmm = {};
+	union {
+		struct nv50_mem_map_vn vn;
+		struct nv50_mem_map_v0 v0;
+	} *args = argv;
+	struct nvkm_device *device = mmu->subdev.device;
+	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+	u64 size = nvkm_memory_size(memory);
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		uvmm.ro   = args->v0.ro;
+		uvmm.kind = args->v0.kind;
+		uvmm.comp = args->v0.comp;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+	} else
+		return ret;
+
+	ret = nvkm_vmm_get(bar, 12, size, pvma);
+	if (ret)
+		return ret;
+
+	*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+	*psize = (*pvma)->size;
+	return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+}
+
+int
+nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+	     void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+	union {
+		struct nv50_mem_vn vn;
+		struct nv50_mem_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	bool contig;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		type   = args->v0.bankswz ? 0x02 : 0x01;
+		contig = args->v0.contig;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		type   = 0x01;
+		contig = false;
+	} else
+		return -ENOSYS;
+
+	return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
+			    page, size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index 37927c3fdc3e6..d201c887c2cd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -21,129 +21,21 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
 
-#include <core/gpuobj.h>
-
-#define NV04_PDMA_SIZE (128 * 1024 * 1024)
-#define NV04_PDMA_PAGE (  4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-	pte = 0x00008 + (pte * 4);
-	nvkm_kmap(pgt);
-	while (cnt) {
-		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
-		u32 phys = (u32)*list++;
-		while (cnt && page--) {
-			nvkm_wo32(pgt, pte, phys | 3);
-			phys += NV04_PDMA_PAGE;
-			pte += 4;
-			cnt -= 1;
-		}
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-	pte = 0x00008 + (pte * 4);
-	nvkm_kmap(pgt);
-	while (cnt--) {
-		nvkm_wo32(pgt, pte, 0x00000000);
-		pte += 4;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv04_vm_flush(struct nvkm_vm *vm)
-{
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv04_mmu_oneinit(struct nvkm_mmu *base)
-{
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	struct nvkm_memory *dma;
-	int ret;
-
-	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
-			      16, true, &dma);
-	mmu->vm->pgt[0].mem[0] = dma;
-	mmu->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	nvkm_kmap(dma);
-	nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
-	nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
-	nvkm_done(dma);
-	return 0;
-}
-
-void *
-nv04_mmu_dtor(struct nvkm_mmu *base)
-{
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	if (mmu->vm) {
-		nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
-		nvkm_vm_ref(NULL, &mmu->vm, NULL);
-	}
-	if (mmu->nullp) {
-		dma_free_coherent(device->dev, 16 * 1024,
-				  mmu->nullp, mmu->null);
-	}
-	return mmu;
-}
-
-int
-nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_mmu **pmmu)
-{
-	struct nv04_mmu *mmu;
-	if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
-		return -ENOMEM;
-	*pmmu = &mmu->base;
-	nvkm_mmu_ctor(func, device, index, &mmu->base);
-	return 0;
-}
+#include <nvif/class.h>
 
 const struct nvkm_mmu_func
 nv04_mmu = {
-	.oneinit = nv04_mmu_oneinit,
-	.dtor = nv04_mmu_dtor,
-	.limit = NV04_PDMA_SIZE,
 	.dma_bits = 32,
-	.pgt_bits = 32 - 12,
-	.spg_shift = 12,
-	.lpg_shift = 12,
-	.map_sg = nv04_vm_map_sg,
-	.unmap = nv04_vm_unmap,
-	.flush = nv04_vm_flush,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
 };
 
 int
 nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 {
-	return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
deleted file mode 100644
index 363e33b296d56..0000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef __NV04_MMU_PRIV__
-#define __NV04_MMU_PRIV__
-#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
-#include "priv.h"
-
-struct nv04_mmu {
-	struct nvkm_mmu base;
-	struct nvkm_vm *vm;
-	dma_addr_t null;
-	void *nullp;
-};
-
-int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
-		  int index, struct nvkm_mmu **);
-void *nv04_mmu_dtor(struct nvkm_mmu *);
-
-extern const struct nvkm_mmu_func nv04_mmu;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index c6a26f907009c..adca81895c09e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -21,113 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
 
-#include <core/gpuobj.h>
 #include <core/option.h>
-#include <subdev/timer.h>
 
-#define NV41_GART_SIZE (512 * 1024 * 1024)
-#define NV41_GART_PAGE (  4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-	pte = pte * 4;
-	nvkm_kmap(pgt);
-	while (cnt) {
-		u32 page = PAGE_SIZE / NV41_GART_PAGE;
-		u64 phys = (u64)*list++;
-		while (cnt && page--) {
-			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
-			phys += NV41_GART_PAGE;
-			pte += 4;
-			cnt -= 1;
-		}
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-	pte = pte * 4;
-	nvkm_kmap(pgt);
-	while (cnt--) {
-		nvkm_wo32(pgt, pte, 0x00000000);
-		pte += 4;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv41_vm_flush(struct nvkm_vm *vm)
-{
-	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
-	struct nvkm_device *device = mmu->base.subdev.device;
-
-	mutex_lock(&mmu->base.subdev.mutex);
-	nvkm_wr32(device, 0x100810, 0x00000022);
-	nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x100810) & 0x00000020)
-			break;
-	);
-	nvkm_wr32(device, 0x100810, 0x00000000);
-	mutex_unlock(&mmu->base.subdev.mutex);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv41_mmu_oneinit(struct nvkm_mmu *base)
-{
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	int ret;
-
-	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
-			      &mmu->vm->pgt[0].mem[0]);
-	mmu->vm->pgt[0].refcount[0] = 1;
-	return ret;
-}
+#include <nvif/class.h>
 
 static void
-nv41_mmu_init(struct nvkm_mmu *base)
+nv41_mmu_init(struct nvkm_mmu *mmu)
 {
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
-	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+	struct nvkm_device *device = mmu->subdev.device;
+	nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
 	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
 	nvkm_wr32(device, 0x100820, 0x00000000);
 }
 
 static const struct nvkm_mmu_func
 nv41_mmu = {
-	.dtor = nv04_mmu_dtor,
-	.oneinit = nv41_mmu_oneinit,
 	.init = nv41_mmu_init,
-	.limit = NV41_GART_SIZE,
 	.dma_bits = 39,
-	.pgt_bits = 32 - 12,
-	.spg_shift = 12,
-	.lpg_shift = 12,
-	.map_sg = nv41_vm_map_sg,
-	.unmap = nv41_vm_unmap,
-	.flush = nv41_vm_flush,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
 };
 
 int
@@ -137,5 +53,5 @@ nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
 		return nv04_mmu_new(device, index, pmmu);
 
-	return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index a648c2395545b..598c53a27bde4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -21,176 +21,18 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
 
-#include <core/gpuobj.h>
 #include <core/option.h>
-#include <subdev/timer.h>
 
-#define NV44_GART_SIZE (512 * 1024 * 1024)
-#define NV44_GART_PAGE (  4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
-	     dma_addr_t *list, u32 pte, u32 cnt)
-{
-	u32 base = (pte << 2) & ~0x0000000f;
-	u32 tmp[4];
-
-	tmp[0] = nvkm_ro32(pgt, base + 0x0);
-	tmp[1] = nvkm_ro32(pgt, base + 0x4);
-	tmp[2] = nvkm_ro32(pgt, base + 0x8);
-	tmp[3] = nvkm_ro32(pgt, base + 0xc);
-
-	while (cnt--) {
-		u32 addr = list ? (*list++ >> 12) : (null >> 12);
-		switch (pte++ & 0x3) {
-		case 0:
-			tmp[0] &= ~0x07ffffff;
-			tmp[0] |= addr;
-			break;
-		case 1:
-			tmp[0] &= ~0xf8000000;
-			tmp[0] |= addr << 27;
-			tmp[1] &= ~0x003fffff;
-			tmp[1] |= addr >> 5;
-			break;
-		case 2:
-			tmp[1] &= ~0xffc00000;
-			tmp[1] |= addr << 22;
-			tmp[2] &= ~0x0001ffff;
-			tmp[2] |= addr >> 10;
-			break;
-		case 3:
-			tmp[2] &= ~0xfffe0000;
-			tmp[2] |= addr << 17;
-			tmp[3] &= ~0x00000fff;
-			tmp[3] |= addr >> 15;
-			break;
-		}
-	}
-
-	nvkm_wo32(pgt, base + 0x0, tmp[0]);
-	nvkm_wo32(pgt, base + 0x4, tmp[1]);
-	nvkm_wo32(pgt, base + 0x8, tmp[2]);
-	nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
-}
-
-static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
-	u32 tmp[4];
-	int i;
-
-	nvkm_kmap(pgt);
-	if (pte & 3) {
-		u32  max = 4 - (pte & 3);
-		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, mmu->null, list, pte, part);
-		pte  += part;
-		list += part;
-		cnt  -= part;
-	}
-
-	while (cnt >= 4) {
-		for (i = 0; i < 4; i++)
-			tmp[i] = *list++ >> 12;
-		nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
-		nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
-		nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
-		nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
-		cnt -= 4;
-	}
-
-	if (cnt)
-		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
-	nvkm_done(pgt);
-}
-
-static void
-nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
-
-	nvkm_kmap(pgt);
-	if (pte & 3) {
-		u32  max = 4 - (pte & 3);
-		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
-		pte  += part;
-		cnt  -= part;
-	}
-
-	while (cnt >= 4) {
-		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-		cnt -= 4;
-	}
-
-	if (cnt)
-		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
-	nvkm_done(pgt);
-}
-
-static void
-nv44_vm_flush(struct nvkm_vm *vm)
-{
-	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
-	nvkm_wr32(device, 0x100808, 0x00000020);
-	nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x100808) & 0x00000001)
-			break;
-	);
-	nvkm_wr32(device, 0x100808, 0x00000000);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv44_mmu_oneinit(struct nvkm_mmu *base)
-{
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	int ret;
-
-	mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
-					&mmu->null, GFP_KERNEL);
-	if (!mmu->nullp) {
-		nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
-		mmu->null = 0;
-	}
-
-	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
-			      512 * 1024, true,
-			      &mmu->vm->pgt[0].mem[0]);
-	mmu->vm->pgt[0].refcount[0] = 1;
-	return ret;
-}
+#include <nvif/class.h>
 
 static void
-nv44_mmu_init(struct nvkm_mmu *base)
+nv44_mmu_init(struct nvkm_mmu *mmu)
 {
-	struct nv04_mmu *mmu = nv04_mmu(base);
-	struct nvkm_device *device = mmu->base.subdev.device;
-	struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
+	struct nvkm_device *device = mmu->subdev.device;
+	struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
 	u32 addr;
 
 	/* calculate vram address of this PRAMIN block, object must be
@@ -198,11 +40,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
 	 * of 512KiB for this to work correctly
 	 */
 	addr  = nvkm_rd32(device, 0x10020c);
-	addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+	addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
 
 	nvkm_wr32(device, 0x100850, 0x80000000);
-	nvkm_wr32(device, 0x100818, mmu->null);
-	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+	nvkm_wr32(device, 0x100818, mmu->vmm->null);
+	nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
 	nvkm_wr32(device, 0x100850, 0x00008000);
 	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
 	nvkm_wr32(device, 0x100820, 0x00000000);
@@ -212,17 +54,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
 
 static const struct nvkm_mmu_func
 nv44_mmu = {
-	.dtor = nv04_mmu_dtor,
-	.oneinit = nv44_mmu_oneinit,
 	.init = nv44_mmu_init,
-	.limit = NV44_GART_SIZE,
 	.dma_bits = 39,
-	.pgt_bits = 32 - 12,
-	.spg_shift = 12,
-	.lpg_shift = 12,
-	.map_sg = nv44_vm_map_sg,
-	.unmap = nv44_vm_unmap,
-	.flush = nv44_vm_flush,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+	.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
 };
 
 int
@@ -232,5 +68,5 @@ nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
 		return nv04_mmu_new(device, index, pmmu);
 
-	return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index a1f8d65f02761..db3dfbbb2aa08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,207 +21,52 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
 
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <engine/gr.h>
+#include <nvif/class.h>
 
-static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
+const u8 *
+nv50_mmu_kind(struct nvkm_mmu *base, int *count)
 {
-	u64 phys = 0xdeadcafe00000000ULL;
-	u32 coverage = 0;
-
-	if (pgt[0]) {
-		/* present, 4KiB pages */
-		phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
-		coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
-	} else
-	if (pgt[1]) {
-		/* present, 64KiB pages  */
-		phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
-		coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
-	}
-
-	if (phys & 1) {
-		if (coverage <= 32 * 1024 * 1024)
-			phys |= 0x60;
-		else if (coverage <= 64 * 1024 * 1024)
-			phys |= 0x40;
-		else if (coverage <= 128 * 1024 * 1024)
-			phys |= 0x20;
-	}
-
-	nvkm_kmap(pgd);
-	nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
-	nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
-	nvkm_done(pgd);
-}
-
-static inline u64
-vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
-	phys |= 1; /* present */
-	phys |= (u64)memtype << 40;
-	phys |= target << 4;
-	if (vma->access & NV_MEM_ACCESS_SYS)
-		phys |= (1 << 6);
-	if (!(vma->access & NV_MEM_ACCESS_WO))
-		phys |= (1 << 3);
-	return phys;
-}
-
-static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	    struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
-	struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
-	u32 comp = (mem->memtype & 0x180) >> 7;
-	u32 block, target;
-	int i;
-
-	/* IGPs don't have real VRAM, re-target to stolen system memory */
-	target = 0;
-	if (ram->stolen) {
-		phys += ram->stolen;
-		target = 3;
-	}
-
-	phys  = vm_addr(vma, phys, mem->memtype, target);
-	pte <<= 3;
-	cnt <<= 3;
-
-	nvkm_kmap(pgt);
-	while (cnt) {
-		u32 offset_h = upper_32_bits(phys);
-		u32 offset_l = lower_32_bits(phys);
-
-		for (i = 7; i >= 0; i--) {
-			block = 1 << (i + 3);
-			if (cnt >= block && !(pte & (block - 1)))
-				break;
-		}
-		offset_l |= (i << 7);
-
-		phys += block << (vma->node->type - 3);
-		cnt  -= block;
-		if (comp) {
-			u32 tag = mem->tag->offset + ((delta >> 16) * comp);
-			offset_h |= (tag << 17);
-			delta    += block << (vma->node->type - 3);
-		}
-
-		while (block) {
-			nvkm_wo32(pgt, pte + 0, offset_l);
-			nvkm_wo32(pgt, pte + 4, offset_h);
-			pte += 8;
-			block -= 8;
-		}
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
-	pte <<= 3;
-	nvkm_kmap(pgt);
-	while (cnt--) {
-		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
-		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
-		pte += 8;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-	pte <<= 3;
-	nvkm_kmap(pgt);
-	while (cnt--) {
-		nvkm_wo32(pgt, pte + 0, 0x00000000);
-		nvkm_wo32(pgt, pte + 4, 0x00000000);
-		pte += 8;
-	}
-	nvkm_done(pgt);
-}
-
-static void
-nv50_vm_flush(struct nvkm_vm *vm)
-{
-	struct nvkm_mmu *mmu = vm->mmu;
-	struct nvkm_subdev *subdev = &mmu->subdev;
-	struct nvkm_device *device = subdev->device;
-	int i, vme;
-
-	mutex_lock(&subdev->mutex);
-	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-		if (!atomic_read(&vm->engref[i]))
-			continue;
-
-		/* unfortunate hw bug workaround... */
-		if (i == NVKM_ENGINE_GR && device->gr) {
-			int ret = nvkm_gr_tlb_flush(device->gr);
-			if (ret != -ENODEV)
-				continue;
-		}
-
-		switch (i) {
-		case NVKM_ENGINE_GR    : vme = 0x00; break;
-		case NVKM_ENGINE_VP    :
-		case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
-		case NVKM_SUBDEV_BAR   : vme = 0x06; break;
-		case NVKM_ENGINE_MSPPP :
-		case NVKM_ENGINE_MPEG  : vme = 0x08; break;
-		case NVKM_ENGINE_BSP   :
-		case NVKM_ENGINE_MSVLD : vme = 0x09; break;
-		case NVKM_ENGINE_CIPHER:
-		case NVKM_ENGINE_SEC   : vme = 0x0a; break;
-		case NVKM_ENGINE_CE0   : vme = 0x0d; break;
-		default:
-			continue;
-		}
-
-		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
-		if (nvkm_msec(device, 2000,
-			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
-				break;
-		) < 0)
-			nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
-	}
-	mutex_unlock(&subdev->mutex);
-}
-
-static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-	       struct lock_class_key *key, struct nvkm_vm **pvm)
-{
-	u32 block = (1 << (mmu->func->pgt_bits + 12));
-	if (block > length)
-		block = length;
-
-	return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
+	/* 0x01: no bank swizzle
+	 * 0x02: bank swizzled
+	 * 0x7f: invalid
+	 *
+	 * 0x01/0x02 are values understood by the VRAM allocator,
+	 * and are required to avoid mixing the two types within
+	 * a certain range.
+	 */
+	static const u8
+	kind[128] = {
+		0x01, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x00 */
+		0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+		0x01, 0x01, 0x01, 0x01, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x10 */
+		0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f, 0x7f, 0x7f,
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x20 */
+		0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f,
+		0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x30 */
+		0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, /* 0x40 */
+		0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f,
+		0x7f, 0x7f, 0x7f, 0x7f, 0x01, 0x01, 0x01, 0x7f, /* 0x50 */
+		0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x60 */
+		0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
+		0x01, 0x7f, 0x02, 0x7f, 0x01, 0x7f, 0x02, 0x7f, /* 0x70 */
+		0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
+	};
+	*count = ARRAY_SIZE(kind);
+	return kind;
 }
 
 static const struct nvkm_mmu_func
 nv50_mmu = {
-	.limit = (1ULL << 40),
 	.dma_bits = 40,
-	.pgt_bits  = 29 - 12,
-	.spg_shift = 12,
-	.lpg_shift = 16,
-	.create = nv50_vm_create,
-	.map_pgt = nv50_vm_map_pgt,
-	.map = nv50_vm_map,
-	.map_sg = nv50_vm_map_sg,
-	.unmap = nv50_vm_unmap,
-	.flush = nv50_vm_flush,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
+	.kind = nv50_mmu_kind,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 27cedc60b507b..d024d8055fcb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -9,31 +9,57 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
 		  int index, struct nvkm_mmu **);
 
 struct nvkm_mmu_func {
-	void *(*dtor)(struct nvkm_mmu *);
-	int (*oneinit)(struct nvkm_mmu *);
 	void (*init)(struct nvkm_mmu *);
 
-	u64 limit;
 	u8  dma_bits;
-	u32 pgt_bits;
-	u8  spg_shift;
-	u8  lpg_shift;
-
-	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
-		       struct lock_class_key *, struct nvkm_vm **);
-
-	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
-			struct nvkm_memory *pgt[2]);
-	void (*map)(struct nvkm_vma *, struct nvkm_memory *,
-		    struct nvkm_mem *, u32 pte, u32 cnt,
-		    u64 phys, u64 delta);
-	void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
-		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
-	void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
-		      u32 pte, u32 cnt);
-	void (*flush)(struct nvkm_vm *);
+
+	struct {
+		struct nvkm_sclass user;
+	} mmu;
+
+	struct {
+		struct nvkm_sclass user;
+		int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
+			    void *argv, u32 argc, struct nvkm_memory **);
+		int (*umap)(struct nvkm_mmu *, struct nvkm_memory *, void *argv,
+			    u32 argc, u64 *addr, u64 *size, struct nvkm_vma **);
+	} mem;
+
+	struct {
+		struct nvkm_sclass user;
+		int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
+			    void *argv, u32 argc, struct lock_class_key *,
+			    const char *name, struct nvkm_vmm **);
+		bool global;
+		u32 pd_offset;
+	} vmm;
+
+	const u8 *(*kind)(struct nvkm_mmu *, int *count);
+	bool kind_sys;
+};
+
+extern const struct nvkm_mmu_func nv04_mmu;
+
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+
+struct nvkm_mmu_pt {
+	union {
+		struct nvkm_mmu_ptc *ptc;
+		struct nvkm_mmu_ptp *ptp;
+	};
+	struct nvkm_memory *memory;
+	bool sub;
+	u16 base;
+	u64 addr;
+	struct list_head head;
 };
 
-int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
-		   struct lock_class_key *, struct nvkm_vm **);
+void nvkm_mmu_ptc_dump(struct nvkm_mmu *);
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *, u32 size, u32 align, bool zero);
+void nvkm_mmu_ptc_put(struct nvkm_mmu *, bool force, struct nvkm_mmu_pt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
new file mode 100644
index 0000000000000..fac2f9a45ea69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_umem;
+struct nvkm_memory *
+nvkm_umem_search(struct nvkm_client *client, u64 handle)
+{
+	struct nvkm_client *master = client->object.client;
+	struct nvkm_memory *memory = NULL;
+	struct nvkm_object *object;
+	struct nvkm_umem *umem;
+
+	object = nvkm_object_search(client, handle, &nvkm_umem);
+	if (IS_ERR(object)) {
+		if (client->super && client != master) {
+			spin_lock(&master->lock);
+			list_for_each_entry(umem, &master->umem, head) {
+				if (umem->object.object == handle) {
+					memory = nvkm_memory_ref(umem->memory);
+					break;
+				}
+			}
+			spin_unlock(&master->lock);
+		}
+	} else {
+		umem = nvkm_umem(object);
+		if (!umem->priv || client->super)
+			memory = nvkm_memory_ref(umem->memory);
+	}
+
+	return memory ? memory : ERR_PTR(-ENOENT);
+}
+
+static int
+nvkm_umem_unmap(struct nvkm_object *object)
+{
+	struct nvkm_umem *umem = nvkm_umem(object);
+
+	if (!umem->map)
+		return -EEXIST;
+
+	if (umem->io) {
+		if (!IS_ERR(umem->bar)) {
+			struct nvkm_device *device = umem->mmu->subdev.device;
+			nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
+		} else {
+			umem->bar = NULL;
+		}
+	} else {
+		vunmap(umem->map);
+		umem->map = NULL;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
+	      enum nvkm_object_map *type, u64 *handle, u64 *length)
+{
+	struct nvkm_umem *umem = nvkm_umem(object);
+	struct nvkm_mmu *mmu = umem->mmu;
+
+	if (!umem->mappable)
+		return -EINVAL;
+	if (umem->map)
+		return -EEXIST;
+
+	if ((umem->type & NVKM_MEM_HOST) && !argc) {
+		int ret = nvkm_mem_map_host(umem->memory, &umem->map);
+		if (ret)
+			return ret;
+
+		*handle = (unsigned long)(void *)umem->map;
+		*length = nvkm_memory_size(umem->memory);
+		*type = NVKM_OBJECT_MAP_VA;
+		return 0;
+	} else
+	if ((umem->type & NVKM_MEM_VRAM) ||
+	    (umem->type & NVKM_MEM_KIND)) {
+		int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
+					      handle, length, &umem->bar);
+		if (ret)
+			return ret;
+
+		*type = NVKM_OBJECT_MAP_IO;
+	} else {
+		return -EINVAL;
+	}
+
+	umem->io = (*type == NVKM_OBJECT_MAP_IO);
+	return 0;
+}
+
+static void *
+nvkm_umem_dtor(struct nvkm_object *object)
+{
+	struct nvkm_umem *umem = nvkm_umem(object);
+	spin_lock(&umem->object.client->lock);
+	list_del_init(&umem->head);
+	spin_unlock(&umem->object.client->lock);
+	nvkm_memory_unref(&umem->memory);
+	return umem;
+}
+
+static const struct nvkm_object_func
+nvkm_umem = {
+	.dtor = nvkm_umem_dtor,
+	.map = nvkm_umem_map,
+	.unmap = nvkm_umem_unmap,
+};
+
+int
+nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+	      struct nvkm_object **pobject)
+{
+	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+	union {
+		struct nvif_mem_v0 v0;
+	} *args = argv;
+	struct nvkm_umem *umem;
+	int type, ret = -ENOSYS;
+	u8  page;
+	u64 size;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+		type = args->v0.type;
+		page = args->v0.page;
+		size = args->v0.size;
+	} else
+		return ret;
+
+	if (type >= mmu->type_nr)
+		return -EINVAL;
+
+	if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
+	umem->mmu = mmu;
+	umem->type = mmu->type[type].type;
+	umem->priv = oclass->client->super;
+	INIT_LIST_HEAD(&umem->head);
+	*pobject = &umem->object;
+
+	if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
+		page = max_t(u8, page, PAGE_SHIFT);
+		umem->mappable = true;
+	}
+
+	ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
+				&umem->memory);
+	if (ret)
+		return ret;
+
+	spin_lock(&umem->object.client->lock);
+	list_add(&umem->head, &umem->object.client->umem);
+	spin_unlock(&umem->object.client->lock);
+
+	args->v0.page = nvkm_memory_page(umem->memory);
+	args->v0.addr = nvkm_memory_addr(umem->memory);
+	args->v0.size = nvkm_memory_size(umem->memory);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
new file mode 100644
index 0000000000000..85cf692d620a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_UMEM_H__
+#define __NVKM_UMEM_H__
+#define nvkm_umem(p) container_of((p), struct nvkm_umem, object)
+#include <core/object.h>
+#include "mem.h"
+
+struct nvkm_umem {
+	struct nvkm_object object;
+	struct nvkm_mmu *mmu;
+	u8 type:8;
+	bool priv:1;
+	bool mappable:1;
+	bool io:1;
+
+	struct nvkm_memory *memory;
+	struct list_head head;
+
+	union {
+		struct nvkm_vma *bar;
+		void *map;
+	};
+};
+
+int nvkm_umem_new(const struct nvkm_oclass *, void *argv, u32 argc,
+		  struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
new file mode 100644
index 0000000000000..353f10f92b774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ummu.h"
+#include "umem.h"
+#include "uvmm.h"
+
+#include <core/client.h>
+
+#include <nvif/if0008.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ummu_sclass(struct nvkm_object *object, int index,
+		 struct nvkm_oclass *oclass)
+{
+	struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
+
+	if (mmu->func->mem.user.oclass && oclass->client->super) {
+		if (index-- == 0) {
+			oclass->base = mmu->func->mem.user;
+			oclass->ctor = nvkm_umem_new;
+			return 0;
+		}
+	}
+
+	if (mmu->func->vmm.user.oclass) {
+		if (index-- == 0) {
+			oclass->base = mmu->func->vmm.user;
+			oclass->ctor = nvkm_uvmm_new;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int
+nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+	struct nvkm_mmu *mmu = ummu->mmu;
+	union {
+		struct nvif_mmu_heap_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	u8 index;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		if ((index = args->v0.index) >= mmu->heap_nr)
+			return -EINVAL;
+		args->v0.size = mmu->heap[index].size;
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+	struct nvkm_mmu *mmu = ummu->mmu;
+	union {
+		struct nvif_mmu_type_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	u8 type, index;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		if ((index = args->v0.index) >= mmu->type_nr)
+			return -EINVAL;
+		type = mmu->type[index].type;
+		args->v0.heap = mmu->type[index].heap;
+		args->v0.vram = !!(type & NVKM_MEM_VRAM);
+		args->v0.host = !!(type & NVKM_MEM_HOST);
+		args->v0.comp = !!(type & NVKM_MEM_COMP);
+		args->v0.disp = !!(type & NVKM_MEM_DISP);
+		args->v0.kind = !!(type & NVKM_MEM_KIND);
+		args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
+		args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
+		args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+	struct nvkm_mmu *mmu = ummu->mmu;
+	union {
+		struct nvif_mmu_kind_v0 v0;
+	} *args = argv;
+	const u8 *kind = NULL;
+	int ret = -ENOSYS, count = 0;
+
+	if (mmu->func->kind)
+		kind = mmu->func->kind(mmu, &count);
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+		if (argc != args->v0.count * sizeof(*args->v0.data))
+			return -EINVAL;
+		if (args->v0.count > count)
+			return -EINVAL;
+		memcpy(args->v0.data, kind, args->v0.count);
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+	struct nvkm_ummu *ummu = nvkm_ummu(object);
+	switch (mthd) {
+	case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
+	case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
+	case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_ummu = {
+	.mthd = nvkm_ummu_mthd,
+	.sclass = nvkm_ummu_sclass,
+};
+
+int
+nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+	      void *argv, u32 argc, struct nvkm_object **pobject)
+{
+	union {
+		struct nvif_mmu_v0 v0;
+	} *args = argv;
+	struct nvkm_mmu *mmu = device->mmu;
+	struct nvkm_ummu *ummu;
+	int ret = -ENOSYS, kinds = 0;
+
+	if (mmu->func->kind)
+		mmu->func->kind(mmu, &kinds);
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		args->v0.dmabits = mmu->dma_bits;
+		args->v0.heap_nr = mmu->heap_nr;
+		args->v0.type_nr = mmu->type_nr;
+		args->v0.kind_nr = kinds;
+	} else
+		return ret;
+
+	if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
+	ummu->mmu = mmu;
+	*pobject = &ummu->object;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
new file mode 100644
index 0000000000000..0cd510dcfc680
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UMMU_H__
+#define __NVKM_UMMU_H__
+#define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
+#include <core/object.h>
+#include "priv.h"
+
+struct nvkm_ummu {
+	struct nvkm_object object;
+	struct nvkm_mmu *mmu;
+};
+
+int nvkm_ummu_new(struct nvkm_device *, const struct nvkm_oclass *,
+		  void *argv, u32 argc, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
new file mode 100644
index 0000000000000..fa81d0c1ba415
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "uvmm.h"
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+
+#include <nvif/if000c.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_uvmm;
+struct nvkm_vmm *
+nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
+{
+	struct nvkm_object *object;
+
+	object = nvkm_object_search(client, handle, &nvkm_uvmm);
+	if (IS_ERR(object))
+		return (void *)object;
+
+	return nvkm_uvmm(object)->vmm;
+}
+
+static int
+nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_unmap_v0 v0;
+	} *args = argv;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	struct nvkm_vma *vma;
+	int ret = -ENOSYS;
+	u64 addr;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		addr = args->v0.addr;
+	} else
+		return ret;
+
+	mutex_lock(&vmm->mutex);
+	vma = nvkm_vmm_node_search(vmm, addr);
+	if (ret = -ENOENT, !vma || vma->addr != addr) {
+		VMM_DEBUG(vmm, "lookup %016llx: %016llx",
+			  addr, vma ? vma->addr : ~0ULL);
+		goto done;
+	}
+
+	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+			  vma->user, !client->super, vma->busy);
+		goto done;
+	}
+
+	if (ret = -EINVAL, !vma->memory) {
+		VMM_DEBUG(vmm, "unmapped");
+		goto done;
+	}
+
+	nvkm_vmm_unmap_locked(vmm, vma);
+	ret = 0;
+done:
+	mutex_unlock(&vmm->mutex);
+	return ret;
+}
+
+static int
+nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_map_v0 v0;
+	} *args = argv;
+	u64 addr, size, handle, offset;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	struct nvkm_vma *vma;
+	struct nvkm_memory *memory;
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+		addr = args->v0.addr;
+		size = args->v0.size;
+		handle = args->v0.memory;
+		offset = args->v0.offset;
+	} else
+		return ret;
+
+	if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
+		VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+		return PTR_ERR(memory);
+	}
+
+	mutex_lock(&vmm->mutex);
+	if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
+		VMM_DEBUG(vmm, "lookup %016llx", addr);
+		goto fail;
+	}
+
+	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+			  vma->user, !client->super, vma->busy);
+		goto fail;
+	}
+
+	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
+		if (addr + size > vma->addr + vma->size || vma->memory ||
+		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
+			VMM_DEBUG(vmm, "split %d %d %d "
+				       "%016llx %016llx %016llx %016llx",
+				  !!vma->memory, vma->refd, vma->mapref,
+				  addr, size, vma->addr, (u64)vma->size);
+			goto fail;
+		}
+
+		if (vma->addr != addr) {
+			const u64 tail = vma->size + vma->addr - addr;
+			if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
+				goto fail;
+			vma->part = true;
+			nvkm_vmm_node_insert(vmm, vma);
+		}
+
+		if (vma->size != size) {
+			const u64 tail = vma->size - size;
+			struct nvkm_vma *tmp;
+			if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
+				nvkm_vmm_unmap_region(vmm, vma);
+				goto fail;
+			}
+			tmp->part = true;
+			nvkm_vmm_node_insert(vmm, tmp);
+		}
+	}
+	vma->busy = true;
+	mutex_unlock(&vmm->mutex);
+
+	ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
+	if (ret == 0) {
+		/* Successful map will clear vma->busy. */
+		nvkm_memory_unref(&memory);
+		return 0;
+	}
+
+	mutex_lock(&vmm->mutex);
+	vma->busy = false;
+	nvkm_vmm_unmap_region(vmm, vma);
+fail:
+	mutex_unlock(&vmm->mutex);
+	nvkm_memory_unref(&memory);
+	return ret;
+}
+
+static int
+nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_put_v0 v0;
+	} *args = argv;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	struct nvkm_vma *vma;
+	int ret = -ENOSYS;
+	u64 addr;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		addr = args->v0.addr;
+	} else
+		return ret;
+
+	mutex_lock(&vmm->mutex);
+	vma = nvkm_vmm_node_search(vmm, args->v0.addr);
+	if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
+		VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
+			  vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
+		goto done;
+	}
+
+	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+			  vma->user, !client->super, vma->busy);
+		goto done;
+	}
+
+	nvkm_vmm_put_locked(vmm, vma);
+	ret = 0;
+done:
+	mutex_unlock(&vmm->mutex);
+	return ret;
+}
+
+static int
+nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_get_v0 v0;
+	} *args = argv;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	struct nvkm_vma *vma;
+	int ret = -ENOSYS;
+	bool getref, mapref, sparse;
+	u8 page, align;
+	u64 size;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
+		mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
+		sparse = args->v0.sparse;
+		page = args->v0.page;
+		align = args->v0.align;
+		size = args->v0.size;
+	} else
+		return ret;
+
+	mutex_lock(&vmm->mutex);
+	ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
+				  page, align, size, &vma);
+	mutex_unlock(&vmm->mutex);
+	if (ret)
+		return ret;
+
+	args->v0.addr = vma->addr;
+	vma->user = !client->super;
+	return ret;
+}
+
+static int
+nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	union {
+		struct nvif_vmm_page_v0 v0;
+	} *args = argv;
+	const struct nvkm_vmm_page *page;
+	int ret = -ENOSYS;
+	u8 type, index, nr;
+
+	page = uvmm->vmm->func->page;
+	for (nr = 0; page[nr].shift; nr++);
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		if ((index = args->v0.index) >= nr)
+			return -EINVAL;
+		type = page[index].type;
+		args->v0.shift = page[index].shift;
+		args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
+		args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
+		args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
+		args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
+	} else
+		return -ENOSYS;
+
+	return 0;
+}
+
+static int
+nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+	switch (mthd) {
+	case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
+	case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
+	case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
+	case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
+	case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static void *
+nvkm_uvmm_dtor(struct nvkm_object *object)
+{
+	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+	nvkm_vmm_unref(&uvmm->vmm);
+	return uvmm;
+}
+
+static const struct nvkm_object_func
+nvkm_uvmm = {
+	.dtor = nvkm_uvmm_dtor,
+	.mthd = nvkm_uvmm_mthd,
+};
+
+int
+nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+	      struct nvkm_object **pobject)
+{
+	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+	const bool more = oclass->base.maxver >= 0;
+	union {
+		struct nvif_vmm_v0 v0;
+	} *args = argv;
+	const struct nvkm_vmm_page *page;
+	struct nvkm_uvmm *uvmm;
+	int ret = -ENOSYS;
+	u64 addr, size;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+		addr = args->v0.addr;
+		size = args->v0.size;
+	} else
+		return ret;
+
+	if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
+	*pobject = &uvmm->object;
+
+	if (!mmu->vmm) {
+		ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
+					  NULL, "user", &uvmm->vmm);
+		if (ret)
+			return ret;
+
+		uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
+	} else {
+		if (size)
+			return -EINVAL;
+
+		uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
+	}
+
+	page = uvmm->vmm->func->page;
+	args->v0.page_nr = 0;
+	while (page && (page++)->shift)
+		args->v0.page_nr++;
+	args->v0.addr = uvmm->vmm->start;
+	args->v0.size = uvmm->vmm->limit;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
new file mode 100644
index 0000000000000..71dab55e18a9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UVMM_H__
+#define __NVKM_UVMM_H__
+#define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
+#include <core/object.h>
+#include "vmm.h"
+
+struct nvkm_uvmm {
+	struct nvkm_object object;
+	struct nvkm_vmm *vmm;
+};
+
+int nvkm_uvmm_new(const struct nvkm_oclass *, void *argv, u32 argc,
+		  struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
new file mode 100644
index 0000000000000..6446bc61b084a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -0,0 +1,1513 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define NVKM_VMM_LEVELS_MAX 5
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+static void
+nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
+{
+	struct nvkm_vmm_pt *pgt = *ppgt;
+	if (pgt) {
+		kvfree(pgt->pde);
+		kfree(pgt);
+		*ppgt = NULL;
+	}
+}
+
+
+static struct nvkm_vmm_pt *
+nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
+		const struct nvkm_vmm_page *page)
+{
+	const u32 pten = 1 << desc->bits;
+	struct nvkm_vmm_pt *pgt;
+	u32 lpte = 0;
+
+	if (desc->type > PGT) {
+		if (desc->type == SPT) {
+			const struct nvkm_vmm_desc *pair = page[-1].desc;
+			lpte = pten >> (desc->bits - pair->bits);
+		} else {
+			lpte = pten;
+		}
+	}
+
+	if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
+		return NULL;
+	pgt->page = page ? page->shift : 0;
+	pgt->sparse = sparse;
+
+	if (desc->type == PGD) {
+		pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
+		if (!pgt->pde) {
+			kfree(pgt);
+			return NULL;
+		}
+	}
+
+	return pgt;
+}
+
+struct nvkm_vmm_iter {
+	const struct nvkm_vmm_page *page;
+	const struct nvkm_vmm_desc *desc;
+	struct nvkm_vmm *vmm;
+	u64 cnt;
+	u16 max, lvl;
+	u32 pte[NVKM_VMM_LEVELS_MAX];
+	struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
+	int flush;
+};
+
+#ifdef CONFIG_NOUVEAU_DEBUG_MMU
+static const char *
+nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
+{
+	switch (desc->type) {
+	case PGD: return "PGD";
+	case PGT: return "PGT";
+	case SPT: return "SPT";
+	case LPT: return "LPT";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static void
+nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
+{
+	int lvl;
+	for (lvl = it->max; lvl >= 0; lvl--) {
+		if (lvl >= it->lvl)
+			buf += sprintf(buf,  "%05x:", it->pte[lvl]);
+		else
+			buf += sprintf(buf, "xxxxx:");
+	}
+}
+
+#define TRA(i,f,a...) do {                                                     \
+	char _buf[NVKM_VMM_LEVELS_MAX * 7];                                    \
+	struct nvkm_vmm_iter *_it = (i);                                       \
+	nvkm_vmm_trace(_it, _buf);                                             \
+	VMM_TRACE(_it->vmm, "%s "f, _buf, ##a);                                \
+} while(0)
+#else
+#define TRA(i,f,a...)
+#endif
+
+static inline void
+nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
+{
+	it->flush = min(it->flush, it->max - it->lvl);
+}
+
+static inline void
+nvkm_vmm_flush(struct nvkm_vmm_iter *it)
+{
+	if (it->flush != NVKM_VMM_LEVELS_MAX) {
+		if (it->vmm->func->flush) {
+			TRA(it, "flush: %d", it->flush);
+			it->vmm->func->flush(it->vmm, it->flush);
+		}
+		it->flush = NVKM_VMM_LEVELS_MAX;
+	}
+}
+
+static void
+nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
+{
+	const struct nvkm_vmm_desc *desc = it->desc;
+	const int type = desc[it->lvl].type == SPT;
+	struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
+	struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
+	struct nvkm_mmu_pt *pt = pgt->pt[type];
+	struct nvkm_vmm *vmm = it->vmm;
+	u32 pdei = it->pte[it->lvl + 1];
+
+	/* Recurse up the tree, unreferencing/destroying unneeded PDs. */
+	it->lvl++;
+	if (--pgd->refs[0]) {
+		const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
+		/* PD has other valid PDEs, so we need a proper update. */
+		TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+		pgt->pt[type] = NULL;
+		if (!pgt->refs[!type]) {
+			/* PDE no longer required. */
+			if (pgd->pt[0]) {
+				if (pgt->sparse) {
+					func->sparse(vmm, pgd->pt[0], pdei, 1);
+					pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
+				} else {
+					func->unmap(vmm, pgd->pt[0], pdei, 1);
+					pgd->pde[pdei] = NULL;
+				}
+			} else {
+				/* Special handling for Tesla-class GPUs,
+				 * where there's no central PD, but each
+				 * instance has its own embedded PD.
+				 */
+				func->pde(vmm, pgd, pdei);
+				pgd->pde[pdei] = NULL;
+			}
+		} else {
+			/* PDE was pointing at dual-PTs and we're removing
+			 * one of them, leaving the other in place.
+			 */
+			func->pde(vmm, pgd, pdei);
+		}
+
+		/* GPU may have cached the PTs, flush before freeing. */
+		nvkm_vmm_flush_mark(it);
+		nvkm_vmm_flush(it);
+	} else {
+		/* PD has no valid PDEs left, so we can just destroy it. */
+		nvkm_vmm_unref_pdes(it);
+	}
+
+	/* Destroy PD/PT. */
+	TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+	nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
+	if (!pgt->refs[!type])
+		nvkm_vmm_pt_del(&pgt);
+	it->lvl--;
+}
+
+static void
+nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+		     const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+	const u32 sptb = desc->bits - pair->bits;
+	const u32 sptn = 1 << sptb;
+	struct nvkm_vmm *vmm = it->vmm;
+	u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+	/* Determine how many SPTEs are being touched under each LPTE,
+	 * and drop reference counts.
+	 */
+	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+		const u32 pten = min(sptn - spti, ptes);
+		pgt->pte[lpti] -= pten;
+		ptes -= pten;
+	}
+
+	/* We're done here if there's no corresponding LPT. */
+	if (!pgt->refs[0])
+		return;
+
+	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+		/* Skip over any LPTEs that still have valid SPTEs. */
+		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
+			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+				if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
+					break;
+			}
+			continue;
+		}
+
+		/* As there's no more non-UNMAPPED SPTEs left in the range
+		 * covered by a number of LPTEs, the LPTEs once again take
+		 * control over their address range.
+		 *
+		 * Determine how many LPTEs need to transition state.
+		 */
+		pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+			if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
+				break;
+			pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+		}
+
+		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+			TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
+			pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
+		} else
+		if (pair->func->invalid) {
+			/* If the MMU supports it, restore the LPTE to the
+			 * INVALID state to tell the MMU there is no point
+			 * trying to fetch the corresponding SPTEs.
+			 */
+			TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
+			pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+		}
+	}
+}
+
+static bool
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+	const struct nvkm_vmm_desc *desc = it->desc;
+	const int type = desc->type == SPT;
+	struct nvkm_vmm_pt *pgt = it->pt[0];
+
+	/* Drop PTE references. */
+	pgt->refs[type] -= ptes;
+
+	/* Dual-PTs need special handling, unless PDE becoming invalid. */
+	if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
+		nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
+
+	/* PT no longer neeed?  Destroy it. */
+	if (!pgt->refs[type]) {
+		it->lvl++;
+		TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
+		it->lvl--;
+		nvkm_vmm_unref_pdes(it);
+		return false; /* PTE writes for unmap() not necessary. */
+	}
+
+	return true;
+}
+
+static void
+nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+		   const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+	const u32 sptb = desc->bits - pair->bits;
+	const u32 sptn = 1 << sptb;
+	struct nvkm_vmm *vmm = it->vmm;
+	u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+	/* Determine how many SPTEs are being touched under each LPTE,
+	 * and increase reference counts.
+	 */
+	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+		const u32 pten = min(sptn - spti, ptes);
+		pgt->pte[lpti] += pten;
+		ptes -= pten;
+	}
+
+	/* We're done here if there's no corresponding LPT. */
+	if (!pgt->refs[0])
+		return;
+
+	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+		/* Skip over any LPTEs that already have valid SPTEs. */
+		if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
+			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+				if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
+					break;
+			}
+			continue;
+		}
+
+		/* As there are now non-UNMAPPED SPTEs in the range covered
+		 * by a number of LPTEs, we need to transfer control of the
+		 * address range to the SPTEs.
+		 *
+		 * Determine how many LPTEs need to transition state.
+		 */
+		pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+			if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
+				break;
+			pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+		}
+
+		if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+			const u32 spti = pteb * sptn;
+			const u32 sptc = ptes * sptn;
+			/* The entire LPTE is marked as sparse, we need
+			 * to make sure that the SPTEs are too.
+			 */
+			TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
+			desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
+			/* Sparse LPTEs prevent SPTEs from being accessed. */
+			TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
+			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+		} else
+		if (pair->func->invalid) {
+			/* MMU supports blocking SPTEs by marking an LPTE
+			 * as INVALID.  We need to reverse that here.
+			 */
+			TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
+			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+		}
+	}
+}
+
+static bool
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+	const struct nvkm_vmm_desc *desc = it->desc;
+	const int type = desc->type == SPT;
+	struct nvkm_vmm_pt *pgt = it->pt[0];
+
+	/* Take PTE references. */
+	pgt->refs[type] += ptes;
+
+	/* Dual-PTs need special handling. */
+	if (desc->type == SPT)
+		nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
+
+	return true;
+}
+
+static void
+nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
+		     struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
+{
+	if (desc->type == PGD) {
+		while (ptes--)
+			pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
+	} else
+	if (desc->type == LPT) {
+		memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
+	}
+}
+
+static bool
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+	struct nvkm_vmm_pt *pt = it->pt[0];
+	if (it->desc->type == PGD)
+		memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
+	else
+	if (it->desc->type == LPT)
+		memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
+	return nvkm_vmm_unref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+	nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
+	return nvkm_vmm_ref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+	const int type = desc->type == SPT;
+	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+	const bool zero = !pgt->sparse && !desc->func->invalid;
+	struct nvkm_vmm *vmm = it->vmm;
+	struct nvkm_mmu *mmu = vmm->mmu;
+	struct nvkm_mmu_pt *pt;
+	u32 pten = 1 << desc->bits;
+	u32 pteb, ptei, ptes;
+	u32 size = desc->size * pten;
+
+	pgd->refs[0]++;
+
+	pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
+	if (!pgt->pt[type]) {
+		it->lvl--;
+		nvkm_vmm_unref_pdes(it);
+		return false;
+	}
+
+	if (zero)
+		goto done;
+
+	pt = pgt->pt[type];
+
+	if (desc->type == LPT && pgt->refs[1]) {
+		/* SPT already exists covering the same range as this LPT,
+		 * which means we need to be careful that any LPTEs which
+		 * overlap valid SPTEs are unmapped as opposed to invalid
+		 * or sparse, which would prevent the MMU from looking at
+		 * the SPTEs on some GPUs.
+		 */
+		for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
+			bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+			for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
+				bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+				if (spte != next)
+					break;
+			}
+
+			if (!spte) {
+				if (pgt->sparse)
+					desc->func->sparse(vmm, pt, pteb, ptes);
+				else
+					desc->func->invalid(vmm, pt, pteb, ptes);
+				memset(&pgt->pte[pteb], 0x00, ptes);
+			} else {
+				desc->func->unmap(vmm, pt, pteb, ptes);
+				while (ptes--)
+					pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
+			}
+		}
+	} else {
+		if (pgt->sparse) {
+			nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
+			desc->func->sparse(vmm, pt, 0, pten);
+		} else {
+			desc->func->invalid(vmm, pt, 0, pten);
+		}
+	}
+
+done:
+	TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
+	it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
+	nvkm_vmm_flush_mark(it);
+	return true;
+}
+
+static bool
+nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+
+	pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
+	if (!pgt) {
+		if (!pgd->refs[0])
+			nvkm_vmm_unref_pdes(it);
+		return false;
+	}
+
+	pgd->pde[pdei] = pgt;
+	return true;
+}
+
+static inline u64
+nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+	      u64 addr, u64 size, const char *name, bool ref,
+	      bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+	      nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
+	      nvkm_vmm_pxe_func CLR_PTES)
+{
+	const struct nvkm_vmm_desc *desc = page->desc;
+	struct nvkm_vmm_iter it;
+	u64 bits = addr >> page->shift;
+
+	it.page = page;
+	it.desc = desc;
+	it.vmm = vmm;
+	it.cnt = size >> page->shift;
+	it.flush = NVKM_VMM_LEVELS_MAX;
+
+	/* Deconstruct address into PTE indices for each mapping level. */
+	for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
+		it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
+		bits >>= desc[it.lvl].bits;
+	}
+	it.max = --it.lvl;
+	it.pt[it.max] = vmm->pd;
+
+	it.lvl = 0;
+	TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
+	         addr, size, page->shift, it.cnt);
+	it.lvl = it.max;
+
+	/* Depth-first traversal of page tables. */
+	while (it.cnt) {
+		struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
+		const int type = desc->type == SPT;
+		const u32 pten = 1 << desc->bits;
+		const u32 ptei = it.pte[0];
+		const u32 ptes = min_t(u64, it.cnt, pten - ptei);
+
+		/* Walk down the tree, finding page tables for each level. */
+		for (; it.lvl; it.lvl--) {
+			const u32 pdei = it.pte[it.lvl];
+			struct nvkm_vmm_pt *pgd = pgt;
+
+			/* Software PT. */
+			if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
+				if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
+					goto fail;
+			}
+			it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
+
+			/* Hardware PT.
+			 *
+			 * This is a separate step from above due to GF100 and
+			 * newer having dual page tables at some levels, which
+			 * are refcounted independently.
+			 */
+			if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
+				if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
+					goto fail;
+			}
+		}
+
+		/* Handle PTE updates. */
+		if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+			struct nvkm_mmu_pt *pt = pgt->pt[type];
+			if (MAP_PTES || CLR_PTES) {
+				if (MAP_PTES)
+					MAP_PTES(vmm, pt, ptei, ptes, map);
+				else
+					CLR_PTES(vmm, pt, ptei, ptes);
+				nvkm_vmm_flush_mark(&it);
+			}
+		}
+
+		/* Walk back up the tree to the next position. */
+		it.pte[it.lvl] += ptes;
+		it.cnt -= ptes;
+		if (it.cnt) {
+			while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
+				it.pte[it.lvl++] = 0;
+				it.pte[it.lvl]++;
+			}
+		}
+	};
+
+	nvkm_vmm_flush(&it);
+	return ~0ULL;
+
+fail:
+	/* Reconstruct the failure address so the caller is able to
+	 * reverse any partially completed operations.
+	 */
+	addr = it.pte[it.max--];
+	do {
+		addr  = addr << desc[it.max].bits;
+		addr |= it.pte[it.max];
+	} while (it.max--);
+
+	return addr << page->shift;
+}
+
+static void
+nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+			 u64 addr, u64 size)
+{
+	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+		      nvkm_vmm_sparse_unref_ptes, NULL, NULL,
+		      page->desc->func->invalid ?
+		      page->desc->func->invalid : page->desc->func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+			 u64 addr, u64 size)
+{
+	if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
+		u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
+					 true, nvkm_vmm_sparse_ref_ptes, NULL,
+					 NULL, page->desc->func->sparse);
+		if (fail != ~0ULL) {
+			if ((size = fail - addr))
+				nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
+			return -ENOMEM;
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+	const struct nvkm_vmm_page *page = vmm->func->page;
+	int m = 0, i;
+	u64 start = addr;
+	u64 block;
+
+	while (size) {
+		/* Limit maximum page size based on remaining size. */
+		while (size < (1ULL << page[m].shift))
+			m++;
+		i = m;
+
+		/* Find largest page size suitable for alignment. */
+		while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
+			i++;
+
+		/* Determine number of PTEs at this page size. */
+		if (i != m) {
+			/* Limited to alignment boundary of next page size. */
+			u64 next = 1ULL << page[i - 1].shift;
+			u64 part = ALIGN(addr, next) - addr;
+			if (size - part >= next)
+				block = (part >> page[i].shift) << page[i].shift;
+			else
+				block = (size >> page[i].shift) << page[i].shift;
+		} else {
+			block = (size >> page[i].shift) << page[i].shift;;
+		}
+
+		/* Perform operation. */
+		if (ref) {
+			int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
+			if (ret) {
+				if ((size = addr - start))
+					nvkm_vmm_ptes_sparse(vmm, start, size, false);
+				return ret;
+			}
+		} else {
+			nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
+		}
+
+		size -= block;
+		addr += block;
+	}
+
+	return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+			u64 addr, u64 size, bool sparse)
+{
+	const struct nvkm_vmm_desc_func *func = page->desc->func;
+	nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+		      false, nvkm_vmm_unref_ptes, NULL, NULL,
+		      sparse ? func->sparse : func->invalid ? func->invalid :
+							      func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+		      u64 addr, u64 size, struct nvkm_vmm_map *map,
+		      nvkm_vmm_pte_func func)
+{
+	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+				 nvkm_vmm_ref_ptes, func, map, NULL);
+	if (fail != ~0ULL) {
+		if ((size = fail - addr))
+			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+		    u64 addr, u64 size, bool sparse)
+{
+	const struct nvkm_vmm_desc_func *func = page->desc->func;
+	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+		      sparse ? func->sparse : func->invalid ? func->invalid :
+							      func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+		  u64 addr, u64 size, struct nvkm_vmm_map *map,
+		  nvkm_vmm_pte_func func)
+{
+	nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+		      NULL, func, map, NULL);
+}
+
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+		  u64 addr, u64 size)
+{
+	nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+		      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
+}
+
+static int
+nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+		  u64 addr, u64 size)
+{
+	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+				 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+	if (fail != ~0ULL) {
+		if (fail != addr)
+			nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static inline struct nvkm_vma *
+nvkm_vma_new(u64 addr, u64 size)
+{
+	struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+	if (vma) {
+		vma->addr = addr;
+		vma->size = size;
+		vma->page = NVKM_VMA_PAGE_NONE;
+		vma->refd = NVKM_VMA_PAGE_NONE;
+	}
+	return vma;
+}
+
+struct nvkm_vma *
+nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
+{
+	struct nvkm_vma *new;
+
+	BUG_ON(vma->size == tail);
+
+	if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
+		return NULL;
+	vma->size -= tail;
+
+	new->mapref = vma->mapref;
+	new->sparse = vma->sparse;
+	new->page = vma->page;
+	new->refd = vma->refd;
+	new->used = vma->used;
+	new->part = vma->part;
+	new->user = vma->user;
+	new->busy = vma->busy;
+	list_add(&new->head, &vma->head);
+	return new;
+}
+
+static void
+nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	struct rb_node **ptr = &vmm->free.rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*ptr) {
+		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+		parent = *ptr;
+		if (vma->size < this->size)
+			ptr = &parent->rb_left;
+		else
+		if (vma->size > this->size)
+			ptr = &parent->rb_right;
+		else
+		if (vma->addr < this->addr)
+			ptr = &parent->rb_left;
+		else
+		if (vma->addr > this->addr)
+			ptr = &parent->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&vma->tree, parent, ptr);
+	rb_insert_color(&vma->tree, &vmm->free);
+}
+
+void
+nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	struct rb_node **ptr = &vmm->root.rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*ptr) {
+		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+		parent = *ptr;
+		if (vma->addr < this->addr)
+			ptr = &parent->rb_left;
+		else
+		if (vma->addr > this->addr)
+			ptr = &parent->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&vma->tree, parent, ptr);
+	rb_insert_color(&vma->tree, &vmm->root);
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
+{
+	struct rb_node *node = vmm->root.rb_node;
+	while (node) {
+		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+		if (addr < vma->addr)
+			node = node->rb_left;
+		else
+		if (addr >= vma->addr + vma->size)
+			node = node->rb_right;
+		else
+			return vma;
+	}
+	return NULL;
+}
+
+static void
+nvkm_vmm_dtor(struct nvkm_vmm *vmm)
+{
+	struct nvkm_vma *vma;
+	struct rb_node *node;
+
+	while ((node = rb_first(&vmm->root))) {
+		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+		nvkm_vmm_put(vmm, &vma);
+	}
+
+	if (vmm->bootstrapped) {
+		const struct nvkm_vmm_page *page = vmm->func->page;
+		const u64 limit = vmm->limit - vmm->start;
+
+		while (page[1].shift)
+			page++;
+
+		nvkm_mmu_ptc_dump(vmm->mmu);
+		nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
+	}
+
+	vma = list_first_entry(&vmm->list, typeof(*vma), head);
+	list_del(&vma->head);
+	kfree(vma);
+	WARN_ON(!list_empty(&vmm->list));
+
+	if (vmm->nullp) {
+		dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
+				  vmm->nullp, vmm->null);
+	}
+
+	if (vmm->pd) {
+		nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
+		nvkm_vmm_pt_del(&vmm->pd);
+	}
+}
+
+int
+nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+	      u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm *vmm)
+{
+	static struct lock_class_key _key;
+	const struct nvkm_vmm_page *page = func->page;
+	const struct nvkm_vmm_desc *desc;
+	struct nvkm_vma *vma;
+	int levels, bits = 0;
+
+	vmm->func = func;
+	vmm->mmu = mmu;
+	vmm->name = name;
+	vmm->debug = mmu->subdev.debug;
+	kref_init(&vmm->kref);
+
+	__mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+
+	/* Locate the smallest page size supported by the backend, it will
+	 * have the the deepest nesting of page tables.
+	 */
+	while (page[1].shift)
+		page++;
+
+	/* Locate the structure that describes the layout of the top-level
+	 * page table, and determine the number of valid bits in a virtual
+	 * address.
+	 */
+	for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
+		bits += desc->bits;
+	bits += page->shift;
+	desc--;
+
+	if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
+		return -EINVAL;
+
+	vmm->start = addr;
+	vmm->limit = size ? (addr + size) : (1ULL << bits);
+	if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+		return -EINVAL;
+
+	/* Allocate top-level page table. */
+	vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
+	if (!vmm->pd)
+		return -ENOMEM;
+	vmm->pd->refs[0] = 1;
+	INIT_LIST_HEAD(&vmm->join);
+
+	/* ... and the GPU storage for it, except on Tesla-class GPUs that
+	 * have the PD embedded in the instance structure.
+	 */
+	if (desc->size) {
+		const u32 size = pd_header + desc->size * (1 << desc->bits);
+		vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
+		if (!vmm->pd->pt[0])
+			return -ENOMEM;
+	}
+
+	/* Initialise address-space MM. */
+	INIT_LIST_HEAD(&vmm->list);
+	vmm->free = RB_ROOT;
+	vmm->root = RB_ROOT;
+
+	if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+		return -ENOMEM;
+
+	nvkm_vmm_free_insert(vmm, vma);
+	list_add(&vma->head, &vmm->list);
+	return 0;
+}
+
+int
+nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+	      u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
+{
+	if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
+}
+
+#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL :              \
+	list_entry((root)->head.dir, struct nvkm_vma, head)
+
+void
+nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	struct nvkm_vma *next;
+
+	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+	nvkm_memory_unref(&vma->memory);
+
+	if (vma->part) {
+		struct nvkm_vma *prev = node(vma, prev);
+		if (!prev->memory) {
+			prev->size += vma->size;
+			rb_erase(&vma->tree, &vmm->root);
+			list_del(&vma->head);
+			kfree(vma);
+			vma = prev;
+		}
+	}
+
+	next = node(vma, next);
+	if (next && next->part) {
+		if (!next->memory) {
+			vma->size += next->size;
+			rb_erase(&next->tree, &vmm->root);
+			list_del(&next->head);
+			kfree(next);
+		}
+	}
+}
+
+void
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
+
+	if (vma->mapref) {
+		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+		vma->refd = NVKM_VMA_PAGE_NONE;
+	} else {
+		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+	}
+
+	nvkm_vmm_unmap_region(vmm, vma);
+}
+
+void
+nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	if (vma->memory) {
+		mutex_lock(&vmm->mutex);
+		nvkm_vmm_unmap_locked(vmm, vma);
+		mutex_unlock(&vmm->mutex);
+	}
+}
+
+static int
+nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		   void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+	switch (nvkm_memory_target(map->memory)) {
+	case NVKM_MEM_TARGET_VRAM:
+		if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
+			VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
+			return -EINVAL;
+		}
+		break;
+	case NVKM_MEM_TARGET_HOST:
+	case NVKM_MEM_TARGET_NCOH:
+		if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
+			VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
+			return -EINVAL;
+		}
+		break;
+	default:
+		WARN_ON(1);
+		return -ENOSYS;
+	}
+
+	if (!IS_ALIGNED(     vma->addr, 1ULL << map->page->shift) ||
+	    !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
+	    !IS_ALIGNED(   map->offset, 1ULL << map->page->shift) ||
+	    nvkm_memory_page(map->memory) < map->page->shift) {
+		VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
+		    vma->addr, (u64)vma->size, map->offset, map->page->shift,
+		    nvkm_memory_page(map->memory));
+		return -EINVAL;
+	}
+
+	return vmm->func->valid(vmm, argv, argc, map);
+}
+
+static int
+nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		    void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+	for (map->page = vmm->func->page; map->page->shift; map->page++) {
+		VMM_DEBUG(vmm, "trying %d", map->page->shift);
+		if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
+			return 0;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		    void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+	nvkm_vmm_pte_func func;
+	int ret;
+
+	/* Make sure we won't overrun the end of the memory object. */
+	if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
+		VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
+			  nvkm_memory_size(map->memory),
+			  map->offset, (u64)vma->size);
+		return -EINVAL;
+	}
+
+	/* Check remaining arguments for validity. */
+	if (vma->page == NVKM_VMA_PAGE_NONE &&
+	    vma->refd == NVKM_VMA_PAGE_NONE) {
+		/* Find the largest page size we can perform the mapping at. */
+		const u32 debug = vmm->debug;
+		vmm->debug = 0;
+		ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+		vmm->debug = debug;
+		if (ret) {
+			VMM_DEBUG(vmm, "invalid at any page size");
+			nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+			return -EINVAL;
+		}
+	} else {
+		/* Page size of the VMA is already pre-determined. */
+		if (vma->refd != NVKM_VMA_PAGE_NONE)
+			map->page = &vmm->func->page[vma->refd];
+		else
+			map->page = &vmm->func->page[vma->page];
+
+		ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
+		if (ret) {
+			VMM_DEBUG(vmm, "invalid %d\n", ret);
+			return ret;
+		}
+	}
+
+	/* Deal with the 'offset' argument, and fetch the backend function. */
+	map->off = map->offset;
+	if (map->mem) {
+		for (; map->off; map->mem = map->mem->next) {
+			u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
+			if (size > map->off)
+				break;
+			map->off -= size;
+		}
+		func = map->page->desc->func->mem;
+	} else
+	if (map->sgl) {
+		for (; map->off; map->sgl = sg_next(map->sgl)) {
+			u64 size = sg_dma_len(map->sgl);
+			if (size > map->off)
+				break;
+			map->off -= size;
+		}
+		func = map->page->desc->func->sgl;
+	} else {
+		map->dma += map->offset >> PAGE_SHIFT;
+		map->off  = map->offset & PAGE_MASK;
+		func = map->page->desc->func->dma;
+	}
+
+	/* Perform the map. */
+	if (vma->refd == NVKM_VMA_PAGE_NONE) {
+		ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
+		if (ret)
+			return ret;
+
+		vma->refd = map->page - vmm->func->page;
+	} else {
+		nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
+	}
+
+	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+	nvkm_memory_unref(&vma->memory);
+	vma->memory = nvkm_memory_ref(map->memory);
+	vma->tags = map->tags;
+	return 0;
+}
+
+int
+nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
+	     struct nvkm_vmm_map *map)
+{
+	int ret;
+	mutex_lock(&vmm->mutex);
+	ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+	vma->busy = false;
+	mutex_unlock(&vmm->mutex);
+	return ret;
+}
+
+static void
+nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	struct nvkm_vma *prev, *next;
+
+	if ((prev = node(vma, prev)) && !prev->used) {
+		rb_erase(&prev->tree, &vmm->free);
+		list_del(&prev->head);
+		vma->addr  = prev->addr;
+		vma->size += prev->size;
+		kfree(prev);
+	}
+
+	if ((next = node(vma, next)) && !next->used) {
+		rb_erase(&next->tree, &vmm->free);
+		list_del(&next->head);
+		vma->size += next->size;
+		kfree(next);
+	}
+
+	nvkm_vmm_free_insert(vmm, vma);
+}
+
+void
+nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+	const struct nvkm_vmm_page *page = vmm->func->page;
+	struct nvkm_vma *next = vma;
+
+	BUG_ON(vma->part);
+
+	if (vma->mapref || !vma->sparse) {
+		do {
+			const bool map = next->memory != NULL;
+			const u8  refd = next->refd;
+			const u64 addr = next->addr;
+			u64 size = next->size;
+
+			/* Merge regions that are in the same state. */
+			while ((next = node(next, next)) && next->part &&
+			       (next->memory != NULL) == map &&
+			       (next->refd == refd))
+				size += next->size;
+
+			if (map) {
+				/* Region(s) are mapped, merge the unmap
+				 * and dereference into a single walk of
+				 * the page tree.
+				 */
+				nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
+							size, vma->sparse);
+			} else
+			if (refd != NVKM_VMA_PAGE_NONE) {
+				/* Drop allocation-time PTE references. */
+				nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+			}
+		} while (next && next->part);
+	}
+
+	/* Merge any mapped regions that were split from the initial
+	 * address-space allocation back into the allocated VMA, and
+	 * release memory/compression resources.
+	 */
+	next = vma;
+	do {
+		if (next->memory)
+			nvkm_vmm_unmap_region(vmm, next);
+	} while ((next = node(vma, next)) && next->part);
+
+	if (vma->sparse && !vma->mapref) {
+		/* Sparse region that was allocated with a fixed page size,
+		 * meaning all relevant PTEs were referenced once when the
+		 * region was allocated, and remained that way, regardless
+		 * of whether memory was mapped into it afterwards.
+		 *
+		 * The process of unmapping, unsparsing, and dereferencing
+		 * PTEs can be done in a single page tree walk.
+		 */
+		nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
+	} else
+	if (vma->sparse) {
+		/* Sparse region that wasn't allocated with a fixed page size,
+		 * PTE references were taken both at allocation time (to make
+		 * the GPU see the region as sparse), and when mapping memory
+		 * into the region.
+		 *
+		 * The latter was handled above, and the remaining references
+		 * are dealt with here.
+		 */
+		nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
+	}
+
+	/* Remove VMA from the list of allocated nodes. */
+	rb_erase(&vma->tree, &vmm->root);
+
+	/* Merge VMA back into the free list. */
+	vma->page = NVKM_VMA_PAGE_NONE;
+	vma->refd = NVKM_VMA_PAGE_NONE;
+	vma->used = false;
+	vma->user = false;
+	nvkm_vmm_put_region(vmm, vma);
+}
+
+void
+nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
+{
+	struct nvkm_vma *vma = *pvma;
+	if (vma) {
+		mutex_lock(&vmm->mutex);
+		nvkm_vmm_put_locked(vmm, vma);
+		mutex_unlock(&vmm->mutex);
+		*pvma = NULL;
+	}
+}
+
+int
+nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
+		    u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
+{
+	const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
+	struct rb_node *node = NULL, *temp;
+	struct nvkm_vma *vma = NULL, *tmp;
+	u64 addr, tail;
+	int ret;
+
+	VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
+		       "shift: %d align: %d size: %016llx",
+		  getref, mapref, sparse, shift, align, size);
+
+	/* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
+	if (unlikely(!size || (!getref && !mapref && sparse))) {
+		VMM_DEBUG(vmm, "args %016llx %d %d %d",
+			  size, getref, mapref, sparse);
+		return -EINVAL;
+	}
+
+	/* Tesla-class GPUs can only select page size per-PDE, which means
+	 * we're required to know the mapping granularity up-front to find
+	 * a suitable region of address-space.
+	 *
+	 * The same goes if we're requesting up-front allocation of PTES.
+	 */
+	if (unlikely((getref || vmm->func->page_block) && !shift)) {
+		VMM_DEBUG(vmm, "page size required: %d %016llx",
+			  getref, vmm->func->page_block);
+		return -EINVAL;
+	}
+
+	/* If a specific page size was requested, determine its index and
+	 * make sure the requested size is a multiple of the page size.
+	 */
+	if (shift) {
+		for (page = vmm->func->page; page->shift; page++) {
+			if (shift == page->shift)
+				break;
+		}
+
+		if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+			VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+			return -EINVAL;
+		}
+		align = max_t(u8, align, shift);
+	} else {
+		align = max_t(u8, align, 12);
+	}
+
+	/* Locate smallest block that can possibly satisfy the allocation. */
+	temp = vmm->free.rb_node;
+	while (temp) {
+		struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
+		if (this->size < size) {
+			temp = temp->rb_right;
+		} else {
+			node = temp;
+			temp = temp->rb_left;
+		}
+	}
+
+	if (unlikely(!node))
+		return -ENOSPC;
+
+	/* Take into account alignment restrictions, trying larger blocks
+	 * in turn until we find a suitable free block.
+	 */
+	do {
+		struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
+		struct nvkm_vma *prev = node(this, prev);
+		struct nvkm_vma *next = node(this, next);
+		const int p = page - vmm->func->page;
+
+		addr = this->addr;
+		if (vmm->func->page_block && prev && prev->page != p)
+			addr = roundup(addr, vmm->func->page_block);
+		addr = ALIGN(addr, 1ULL << align);
+
+		tail = this->addr + this->size;
+		if (vmm->func->page_block && next && next->page != p)
+			tail = rounddown(tail, vmm->func->page_block);
+
+		if (addr <= tail && tail - addr >= size) {
+			rb_erase(&this->tree, &vmm->free);
+			vma = this;
+			break;
+		}
+	} while ((node = rb_next(node)));
+
+	if (unlikely(!vma))
+		return -ENOSPC;
+
+	/* If the VMA we found isn't already exactly the requested size,
+	 * it needs to be split, and the remaining free blocks returned.
+	 */
+	if (addr != vma->addr) {
+		if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
+			nvkm_vmm_put_region(vmm, vma);
+			return -ENOMEM;
+		}
+		nvkm_vmm_free_insert(vmm, vma);
+		vma = tmp;
+	}
+
+	if (size != vma->size) {
+		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+			nvkm_vmm_put_region(vmm, vma);
+			return -ENOMEM;
+		}
+		nvkm_vmm_free_insert(vmm, tmp);
+	}
+
+	/* Pre-allocate page tables and/or setup sparse mappings. */
+	if (sparse && getref)
+		ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
+	else if (sparse)
+		ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
+	else if (getref)
+		ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
+	else
+		ret = 0;
+	if (ret) {
+		nvkm_vmm_put_region(vmm, vma);
+		return ret;
+	}
+
+	vma->mapref = mapref && !getref;
+	vma->sparse = sparse;
+	vma->page = page - vmm->func->page;
+	vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
+	vma->used = true;
+	nvkm_vmm_node_insert(vmm, vma);
+	*pvma = vma;
+	return 0;
+}
+
+int
+nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
+{
+	int ret;
+	mutex_lock(&vmm->mutex);
+	ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
+	mutex_unlock(&vmm->mutex);
+	return ret;
+}
+
+void
+nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	if (vmm->func->part && inst) {
+		mutex_lock(&vmm->mutex);
+		vmm->func->part(vmm, inst);
+		mutex_unlock(&vmm->mutex);
+	}
+}
+
+int
+nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	int ret = 0;
+	if (vmm->func->join) {
+		mutex_lock(&vmm->mutex);
+		ret = vmm->func->join(vmm, inst);
+		mutex_unlock(&vmm->mutex);
+	}
+	return ret;
+}
+
+static bool
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+	const struct nvkm_vmm_desc *desc = it->desc;
+	const int type = desc->type == SPT;
+	nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
+	return false;
+}
+
+int
+nvkm_vmm_boot(struct nvkm_vmm *vmm)
+{
+	const struct nvkm_vmm_page *page = vmm->func->page;
+	const u64 limit = vmm->limit - vmm->start;
+	int ret;
+
+	while (page[1].shift)
+		page++;
+
+	ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
+	if (ret)
+		return ret;
+
+	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+		      nvkm_vmm_boot_ptes, NULL, NULL, NULL);
+	vmm->bootstrapped = true;
+	return 0;
+}
+
+static void
+nvkm_vmm_del(struct kref *kref)
+{
+	struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
+	nvkm_vmm_dtor(vmm);
+	kfree(vmm);
+}
+
+void
+nvkm_vmm_unref(struct nvkm_vmm **pvmm)
+{
+	struct nvkm_vmm *vmm = *pvmm;
+	if (vmm) {
+		kref_put(&vmm->kref, nvkm_vmm_del);
+		*pvmm = NULL;
+	}
+}
+
+struct nvkm_vmm *
+nvkm_vmm_ref(struct nvkm_vmm *vmm)
+{
+	if (vmm)
+		kref_get(&vmm->kref);
+	return vmm;
+}
+
+int
+nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
+	     u32 argc, struct lock_class_key *key, const char *name,
+	     struct nvkm_vmm **pvmm)
+{
+	struct nvkm_mmu *mmu = device->mmu;
+	struct nvkm_vmm *vmm = NULL;
+	int ret;
+	ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm);
+	if (ret)
+		nvkm_vmm_unref(&vmm);
+	*pvmm = vmm;
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
new file mode 100644
index 0000000000000..6d8f61ea467af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -0,0 +1,310 @@
+#ifndef __NVKM_VMM_H__
+#define __NVKM_VMM_H__
+#include "priv.h"
+#include <core/memory.h>
+enum nvkm_memory_target;
+
+struct nvkm_vmm_pt {
+	/* Some GPUs have a mapping level with a dual page tables to
+	 * support large and small pages in the same address-range.
+	 *
+	 * We track the state of both page tables in one place, which
+	 * is why there's multiple PT pointers/refcounts here.
+	 */
+	struct nvkm_mmu_pt *pt[2];
+	u32 refs[2];
+
+	/* Page size handled by this PT.
+	 *
+	 * Tesla backend needs to know this when writinge PDEs,
+	 * otherwise unnecessary.
+	 */
+	u8 page;
+
+	/* Entire page table sparse.
+	 *
+	 * Used to propagate sparseness to child page tables.
+	 */
+	bool sparse:1;
+
+	/* Tracking for page directories.
+	 *
+	 * The array is indexed by PDE, and will either point to the
+	 * child page table, or indicate the PDE is marked as sparse.
+	 **/
+#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
+#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
+#define NVKM_VMM_PDE_SPARSE       ERR_PTR(-EBUSY)
+	struct nvkm_vmm_pt **pde;
+
+	/* Tracking for dual page tables.
+	 *
+	 * There's one entry for each LPTE, keeping track of whether
+	 * there are valid SPTEs in the same address-range.
+	 *
+	 * This information is used to manage LPTE state transitions.
+	 */
+#define NVKM_VMM_PTE_SPARSE 0x80
+#define NVKM_VMM_PTE_VALID  0x40
+#define NVKM_VMM_PTE_SPTES  0x3f
+	u8 pte[];
+};
+
+typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
+				  struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
+				  struct nvkm_vmm_pt *, u32 pdei);
+typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
+				  u32 ptei, u32 ptes, struct nvkm_vmm_map *);
+
+struct nvkm_vmm_desc_func {
+	nvkm_vmm_pxe_func invalid;
+	nvkm_vmm_pxe_func unmap;
+	nvkm_vmm_pxe_func sparse;
+
+	nvkm_vmm_pde_func pde;
+
+	nvkm_vmm_pte_func mem;
+	nvkm_vmm_pte_func dma;
+	nvkm_vmm_pte_func sgl;
+};
+
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
+void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
+void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+		       struct nvkm_vmm_map *);
+void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+		       struct nvkm_vmm_map *);
+void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+		       struct nvkm_vmm_map *);
+
+void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+
+struct nvkm_vmm_desc {
+	enum {
+		PGD,
+		PGT,
+		SPT,
+		LPT,
+	} type;
+	u8 bits;	/* VMA bits covered by PT. */
+	u8 size;	/* Bytes-per-PTE. */
+	u32 align;	/* PT address alignment. */
+	const struct nvkm_vmm_desc_func *func;
+};
+
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
+extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];
+
+struct nvkm_vmm_page {
+	u8 shift;
+	const struct nvkm_vmm_desc *desc;
+#define NVKM_VMM_PAGE_SPARSE                                               0x01
+#define NVKM_VMM_PAGE_VRAM                                                 0x02
+#define NVKM_VMM_PAGE_HOST                                                 0x04
+#define NVKM_VMM_PAGE_COMP                                                 0x08
+#define NVKM_VMM_PAGE_Sxxx                                (NVKM_VMM_PAGE_SPARSE)
+#define NVKM_VMM_PAGE_xVxx                                  (NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_SVxx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_xxHx                                  (NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SxHx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVHx             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SVHx             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVxC             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SVxC             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_xxHC             (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SxHC             (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
+	u8 type;
+};
+
+struct nvkm_vmm_func {
+	int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
+	void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
+
+	int (*aper)(enum nvkm_memory_target);
+	int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
+		     struct nvkm_vmm_map *);
+	void (*flush)(struct nvkm_vmm *, int depth);
+
+	u64 page_block;
+	const struct nvkm_vmm_page page[];
+};
+
+struct nvkm_vmm_join {
+	struct nvkm_memory *inst;
+	struct list_head head;
+};
+
+int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+		  const char *name, struct nvkm_vmm **);
+int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+		  const char *name, struct nvkm_vmm *);
+struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
+			bool sparse, u8 page, u8 align, u64 size,
+			struct nvkm_vma **pvma);
+void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
+
+struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
+void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
+
+int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
+		  u64, u64, void *, u32, struct lock_class_key *,
+		  const char *, struct nvkm_vmm **);
+int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+
+int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+		   struct nvkm_mmu *, u64, u64, void *, u32,
+		   struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int gf100_vmm_aper(enum nvkm_memory_target);
+int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gf100_vmm_flush_(struct nvkm_vmm *, int);
+void gf100_vmm_flush(struct nvkm_vmm *, int);
+
+int gk20a_vmm_aper(enum nvkm_memory_target);
+
+int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+		   struct nvkm_mmu *, u64, u64, void *, u32,
+		   struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
+int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gp100_vmm_flush(struct nvkm_vmm *, int);
+
+int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+			struct lock_class_key *, const char *,
+			struct nvkm_vmm **);
+int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+			struct lock_class_key *, const char *,
+			struct nvkm_vmm **);
+int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
+int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
+int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
+
+#define VMM_PRINT(l,v,p,f,a...) do {                                           \
+	struct nvkm_vmm *_vmm = (v);                                           \
+	if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) {               \
+		nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n",            \
+			     _vmm->name, ##a);                                 \
+	}                                                                      \
+} while(0)
+#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
+#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
+#define VMM_SPAM(v,f,a...)  VMM_PRINT(NV_DBG_SPAM , (v),  dbg, f, ##a)
+
+#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do {            \
+	nvkm_kmap((PT)->memory);                                               \
+	while (PTEN) {                                                         \
+		u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift;           \
+		u64 _addr = ((BASE) + MAP->off);                               \
+                                                                               \
+		if (_ptes > PTEN) {                                            \
+			MAP->off += PTEN << MAP->page->shift;                  \
+			_ptes = PTEN;                                          \
+		} else {                                                       \
+			MAP->off = 0;                                          \
+			NEXT;                                                  \
+		}                                                              \
+                                                                               \
+		VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes);      \
+                                                                               \
+		FILL(VMM, PT, PTEI, _ptes, MAP, _addr);                        \
+		PTEI += _ptes;                                                 \
+		PTEN -= _ptes;                                                 \
+	};                                                                     \
+	nvkm_done((PT)->memory);                                               \
+} while(0)
+
+#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
+	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
+		     ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT),             \
+		     ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT),             \
+		     (MAP->mem = MAP->mem->next))
+#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
+	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
+		     *MAP->dma, PAGE_SIZE, MAP->dma++)
+#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
+	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
+		     sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl),           \
+		     (MAP->sgl = sg_next(MAP->sgl)))
+
+#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
+#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
+#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do {                                     \
+	const u32 _pteo = (o); u##b _data = (d);                               \
+	VMM_SPAM((v), "   %010llx "f, (m)->addr + _pteo, _data, ##a);          \
+	VMM_##fn((m), (m)->base + _pteo, _data, (c), b);                       \
+} while(0)
+
+#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 32, WO, "%08x")
+#define VMM_FO032(m,v,o,d,c)                                                   \
+	VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
+
+#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 64, WO, "%016llx")
+#define VMM_FO064(m,v,o,d,c)                                                   \
+	VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
+
+#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do {                                   \
+	u32 _pteo = (o), _ptes = (c);                                          \
+	const u64 _addr = (m)->addr + _pteo;                                   \
+	VMM_SPAM((v), "   %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a);   \
+	while (_ptes--) {                                                      \
+		nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo));           \
+		nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi));           \
+		_pteo += 0x10;                                                 \
+	}                                                                      \
+} while(0)
+
+#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
+#define VMM_FO128(m,v,o,lo,hi,c) do {                                          \
+	nvkm_kmap((m)->memory);                                                \
+	VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c));                    \
+	nvkm_done((m)->memory);                                                \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
new file mode 100644
index 0000000000000..faf5a7e9265ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/timer.h>
+
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u64 base = (addr >> 8) | map->type;
+	u64 data = base;
+
+	if (map->ctag && !(map->next & (1ULL << 44))) {
+		while (ptes--) {
+			data = base | ((map->ctag >> 1) << 44);
+			if (!(map->ctag++ & 1))
+				data |= BIT_ULL(60);
+
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+			base += map->next;
+		}
+	} else {
+		map->type += ptes * map->ctag;
+
+		while (ptes--) {
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+			data += map->next;
+		}
+	}
+}
+
+void
+gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	if (map->page->shift == PAGE_SHIFT) {
+		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+		nvkm_kmap(pt->memory);
+		while (ptes--) {
+			const u64 data = (*map->dma++ >> 8) | map->type;
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+			map->type += map->ctag;
+		}
+		nvkm_done(pt->memory);
+		return;
+	}
+
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+		    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgt = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.mem = gf100_vmm_pgt_mem,
+	.dma = gf100_vmm_pgt_dma,
+	.sgl = gf100_vmm_pgt_sgl,
+};
+
+void
+gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+	struct nvkm_mmu_pt *pd = pgd->pt[0];
+	struct nvkm_mmu_pt *pt;
+	u64 data = 0;
+
+	if ((pt = pgt->pt[0])) {
+		switch (nvkm_memory_target(pt->memory)) {
+		case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
+		case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
+			data |= BIT_ULL(35); /* VOL */
+			break;
+		case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
+		default:
+			WARN_ON(1);
+			return;
+		}
+		data |= pt->addr >> 8;
+	}
+
+	if ((pt = pgt->pt[1])) {
+		switch (nvkm_memory_target(pt->memory)) {
+		case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
+		case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
+			data |= BIT_ULL(34); /* VOL */
+			break;
+		case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
+		default:
+			WARN_ON(1);
+			return;
+		}
+		data |= pt->addr << 24;
+	}
+
+	nvkm_kmap(pd->memory);
+	VMM_WO064(pd, vmm, pdei * 8, data);
+	nvkm_done(pd->memory);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgd = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.pde = gf100_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_12[] = {
+	{ SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_17[] = {
+	{ LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_12[] = {
+	{ SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_16[] = {
+	{ LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+void
+gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
+{
+	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 type = depth << 24;
+
+	type = 0x00000001; /* PAGE_ALL */
+	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+		type |= 0x00000004; /* HUB_ONLY */
+
+	mutex_lock(&subdev->mutex);
+	/* Looks like maybe a "free flush slots" counter, the
+	 * faster you write to 0x100cbc to more it decreases.
+	 */
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+			break;
+	);
+
+	nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
+	nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
+
+	/* Wait for flush to be queued? */
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+			break;
+	);
+	mutex_unlock(&subdev->mutex);
+}
+
+void
+gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+	gf100_vmm_flush_(vmm, 0);
+}
+
+int
+gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+		struct nvkm_vmm_map *map)
+{
+	const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+	const struct nvkm_vmm_page *page = map->page;
+	const bool gm20x = page->desc->func->sparse != NULL;
+	union {
+		struct gf100_vmm_map_vn vn;
+		struct gf100_vmm_map_v0 v0;
+	} *args = argv;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	struct nvkm_memory *memory = map->memory;
+	u8  kind, priv, ro, vol;
+	int kindn, aper, ret = -ENOSYS;
+	const u8 *kindm;
+
+	map->next = (1 << page->shift) >> 8;
+	map->type = map->ctag = 0;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		vol  = !!args->v0.vol;
+		ro   = !!args->v0.ro;
+		priv = !!args->v0.priv;
+		kind =   args->v0.kind;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		vol  = target == NVKM_MEM_TARGET_HOST;
+		ro   = 0;
+		priv = 0;
+		kind = 0x00;
+	} else {
+		VMM_DEBUG(vmm, "args");
+		return ret;
+	}
+
+	aper = vmm->func->aper(target);
+	if (WARN_ON(aper < 0))
+		return aper;
+
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+	if (kind >= kindn || kindm[kind] == 0xff) {
+		VMM_DEBUG(vmm, "kind %02x", kind);
+		return -EINVAL;
+	}
+
+	if (kindm[kind] != kind) {
+		u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
+		u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
+		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+			return -EINVAL;
+		}
+
+		ret = nvkm_memory_tags_get(memory, device, tags,
+					   nvkm_ltc_tags_clear,
+					   &map->tags);
+		if (ret) {
+			VMM_DEBUG(vmm, "comp %d", ret);
+			return ret;
+		}
+
+		if (map->tags->mn) {
+			u64 tags = map->tags->mn->offset + (map->offset >> 17);
+			if (page->shift == 17 || !gm20x) {
+				map->type |= tags << 44;
+				map->ctag |= 1ULL << 44;
+				map->next |= 1ULL << 44;
+			} else {
+				map->ctag |= tags << 1 | 1;
+			}
+		} else {
+			kind = kindm[kind];
+		}
+	}
+
+	map->type |= BIT(0);
+	map->type |= (u64)priv << 1;
+	map->type |= (u64)  ro << 2;
+	map->type |= (u64) vol << 32;
+	map->type |= (u64)aper << 33;
+	map->type |= (u64)kind << 36;
+	return 0;
+}
+
+int
+gf100_vmm_aper(enum nvkm_memory_target target)
+{
+	switch (target) {
+	case NVKM_MEM_TARGET_VRAM: return 0;
+	case NVKM_MEM_TARGET_HOST: return 2;
+	case NVKM_MEM_TARGET_NCOH: return 3;
+	default:
+		return -EINVAL;
+	}
+}
+
+void
+gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	nvkm_fo64(inst, 0x0200, 0x00000000, 2);
+}
+
+int
+gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+	struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+
+	switch (nvkm_memory_target(pd->memory)) {
+	case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
+	case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
+		base |= BIT_ULL(2) /* VOL. */;
+		break;
+	case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	base |= pd->addr;
+
+	nvkm_kmap(inst);
+	nvkm_wo64(inst, 0x0200, base);
+	nvkm_wo64(inst, 0x0208, vmm->limit - 1);
+	nvkm_done(inst);
+	return 0;
+}
+
+int
+gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	return gf100_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gf100_vmm_17 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+		{ 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+		{}
+	}
+};
+
+static const struct nvkm_vmm_func
+gf100_vmm_16 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+		{ 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+		{}
+	}
+};
+
+int
+gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
+	       const struct nvkm_vmm_func *func_17,
+	       struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	       struct lock_class_key *key, const char *name,
+	       struct nvkm_vmm **pvmm)
+{
+	switch (mmu->subdev.device->fb->page) {
+	case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
+				      argv, argc, key, name, pvmm);
+	case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
+				      argv, argc, key, name, pvmm);
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
+int
+gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
new file mode 100644
index 0000000000000..0ebb7bccfcd22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+void
+gk104_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+		      struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	/* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gk104_vmm_lpt = {
+	.invalid = gk104_vmm_lpt_invalid,
+	.unmap = gf100_vmm_pgt_unmap,
+	.mem = gf100_vmm_pgt_mem,
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_12[] = {
+	{ SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_17[] = {
+	{ LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+	{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_12[] = {
+	{ SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+	{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_16[] = {
+	{ LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+	{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+	{}
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_17 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+		{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+		{}
+	}
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_16 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+		{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+		{}
+	}
+};
+
+int
+gk104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
new file mode 100644
index 0000000000000..8086994a04461
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <core/memory.h>
+
+int
+gk20a_vmm_aper(enum nvkm_memory_target target)
+{
+	switch (target) {
+	case NVKM_MEM_TARGET_NCOH: return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct nvkm_vmm_func
+gk20a_vmm_17 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
+		{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
+		{}
+	}
+};
+
+static const struct nvkm_vmm_func
+gk20a_vmm_16 = {
+	.join = gf100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
+		{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
+		{}
+	}
+};
+
+int
+gk20a_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
new file mode 100644
index 0000000000000..a1676a4644fe2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/ifb00d.h>
+#include <nvif/unpack.h>
+
+static void
+gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_spt = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgt_sparse,
+	.mem = gf100_vmm_pgt_mem,
+	.dma = gf100_vmm_pgt_dma,
+	.sgl = gf100_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_lpt = {
+	.invalid = gk104_vmm_lpt_invalid,
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgt_sparse,
+	.mem = gf100_vmm_pgt_mem,
+};
+
+static void
+gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+	/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+	VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_pgd = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgd_sparse,
+	.pde = gf100_vmm_pgd_pde,
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_12[] = {
+	{ SPT, 15, 8, 0x1000, &gm200_vmm_spt },
+	{ PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_17[] = {
+	{ LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+	{ PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_12[] = {
+	{ SPT, 14, 8, 0x1000, &gm200_vmm_spt },
+	{ PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_16[] = {
+	{ LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+	{ PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+	{}
+};
+
+int
+gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+	if (vmm->func->page[1].shift == 16)
+		base |= BIT_ULL(11);
+	return gf100_vmm_join_(vmm, inst, base);
+}
+
+int
+gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	return gm200_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gm200_vmm_17 = {
+	.join = gm200_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
+		{ 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
+		{}
+	}
+};
+
+static const struct nvkm_vmm_func
+gm200_vmm_16 = {
+	.join = gm200_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
+		{ 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
+		{}
+	}
+};
+
+int
+gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
+	       const struct nvkm_vmm_func *func_17,
+	       struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	       struct lock_class_key *key, const char *name,
+	       struct nvkm_vmm **pvmm)
+{
+	const struct nvkm_vmm_func *func;
+	union {
+		struct gm200_vmm_vn vn;
+		struct gm200_vmm_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		switch (args->v0.bigpage) {
+		case 16: func = func_16; break;
+		case 17: func = func_17; break;
+		default:
+			return -EINVAL;
+		}
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		func = func_17;
+	} else
+		return ret;
+
+	return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm);
+}
+
+int
+gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
+
+int
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+		    void *argv, u32 argc, struct lock_class_key *key,
+		    const char *name, struct nvkm_vmm **pvmm)
+{
+	return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
new file mode 100644
index 0000000000000..64d4b6cff8dd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gm20b_vmm_17 = {
+	.join = gm200_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gk20a_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
+		{ 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SxHx },
+		{}
+	}
+};
+
+static const struct nvkm_vmm_func
+gm20b_vmm_16 = {
+	.join = gm200_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gk20a_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
+	.page = {
+		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
+		{ 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SxHx },
+		{}
+	}
+};
+
+int
+gm20b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
+
+int
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+		    void *argv, u32 argc, struct lock_class_key *key,
+		    const char *name, struct nvkm_vmm **pvmm)
+{
+	return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+			      size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
new file mode 100644
index 0000000000000..059fafe0e771a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u64 data = (addr >> 4) | map->type;
+
+	map->type += ptes * map->ctag;
+
+	while (ptes--) {
+		VMM_WO064(pt, vmm, ptei++ * 8, data);
+		data += map->next;
+	}
+}
+
+static void
+gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	if (map->page->shift == PAGE_SHIFT) {
+		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+		nvkm_kmap(pt->memory);
+		while (ptes--) {
+			const u64 data = (*map->dma++ >> 4) | map->type;
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+			map->type += map->ctag;
+		}
+		nvkm_done(pt->memory);
+		return;
+	}
+
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_spt = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gp100_vmm_pgt_sparse,
+	.mem = gp100_vmm_pgt_mem,
+	.dma = gp100_vmm_pgt_dma,
+	.sgl = gp100_vmm_pgt_sgl,
+};
+
+static void
+gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+		      struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	/* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_lpt = {
+	.invalid = gp100_vmm_lpt_invalid,
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gp100_vmm_pgt_sparse,
+	.mem = gp100_vmm_pgt_mem,
+};
+
+static inline void
+gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u64 data = (addr >> 4) | map->type;
+
+	map->type += ptes * map->ctag;
+
+	while (ptes--) {
+		VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
+		data += map->next;
+	}
+}
+
+static void
+gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
+}
+
+static inline bool
+gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+	switch (nvkm_memory_target(pt->memory)) {
+	case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
+	case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
+		*data |= BIT_ULL(3); /* VOL. */
+		break;
+	case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+	*data |= pt->addr >> 4;
+	return true;
+}
+
+static void
+gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+	struct nvkm_mmu_pt *pd = pgd->pt[0];
+	u64 data[2] = {};
+
+	if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
+		return;
+	if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
+		return;
+
+	nvkm_kmap(pd->memory);
+	VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
+	nvkm_done(pd->memory);
+}
+
+static void
+gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+	/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+	VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
+}
+
+static void
+gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+		    struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+	VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd0 = {
+	.unmap = gp100_vmm_pd0_unmap,
+	.sparse = gp100_vmm_pd0_sparse,
+	.pde = gp100_vmm_pd0_pde,
+	.mem = gp100_vmm_pd0_mem,
+};
+
+static void
+gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+	struct nvkm_mmu_pt *pd = pgd->pt[0];
+	u64 data = 0;
+
+	if (!gp100_vmm_pde(pgt->pt[0], &data))
+		return;
+
+	nvkm_kmap(pd->memory);
+	VMM_WO064(pd, vmm, pdei * 8, data);
+	nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd1 = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gp100_vmm_pgt_sparse,
+	.pde = gp100_vmm_pd1_pde,
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_16[] = {
+	{ LPT, 5,  8, 0x0100, &gp100_vmm_desc_lpt },
+	{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{ PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{}
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_12[] = {
+	{ SPT, 9,  8, 0x1000, &gp100_vmm_desc_spt },
+	{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{ PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
+	{}
+};
+
+int
+gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+		struct nvkm_vmm_map *map)
+{
+	const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+	const struct nvkm_vmm_page *page = map->page;
+	union {
+		struct gp100_vmm_map_vn vn;
+		struct gp100_vmm_map_v0 v0;
+	} *args = argv;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	struct nvkm_memory *memory = map->memory;
+	u8  kind, priv, ro, vol;
+	int kindn, aper, ret = -ENOSYS;
+	const u8 *kindm;
+
+	map->next = (1ULL << page->shift) >> 4;
+	map->type = 0;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		vol  = !!args->v0.vol;
+		ro   = !!args->v0.ro;
+		priv = !!args->v0.priv;
+		kind =   args->v0.kind;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		vol  = target == NVKM_MEM_TARGET_HOST;
+		ro   = 0;
+		priv = 0;
+		kind = 0x00;
+	} else {
+		VMM_DEBUG(vmm, "args");
+		return ret;
+	}
+
+	aper = vmm->func->aper(target);
+	if (WARN_ON(aper < 0))
+		return aper;
+
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+	if (kind >= kindn || kindm[kind] == 0xff) {
+		VMM_DEBUG(vmm, "kind %02x", kind);
+		return -EINVAL;
+	}
+
+	if (kindm[kind] != kind) {
+		u64 tags = nvkm_memory_size(memory) >> 16;
+		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+			return -EINVAL;
+		}
+
+		ret = nvkm_memory_tags_get(memory, device, tags,
+					   nvkm_ltc_tags_clear,
+					   &map->tags);
+		if (ret) {
+			VMM_DEBUG(vmm, "comp %d", ret);
+			return ret;
+		}
+
+		if (map->tags->mn) {
+			tags = map->tags->mn->offset + (map->offset >> 16);
+			map->ctag |= ((1ULL << page->shift) >> 16) << 36;
+			map->type |= tags << 36;
+			map->next |= map->ctag;
+		} else {
+			kind = kindm[kind];
+		}
+	}
+
+	map->type |= BIT(0);
+	map->type |= (u64)aper << 1;
+	map->type |= (u64) vol << 3;
+	map->type |= (u64)priv << 5;
+	map->type |= (u64)  ro << 6;
+	map->type |= (u64)kind << 56;
+	return 0;
+}
+
+void
+gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+	gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
+}
+
+int
+gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+	return gf100_vmm_join_(vmm, inst, base);
+}
+
+static const struct nvkm_vmm_func
+gp100_vmm = {
+	.join = gp100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gp100_vmm_valid,
+	.flush = gp100_vmm_flush,
+	.page = {
+		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+		{}
+	}
+};
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
new file mode 100644
index 0000000000000..3dcc6bddb32f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gp10b_vmm = {
+	.join = gp100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gk20a_vmm_aper,
+	.valid = gp100_vmm_valid,
+	.flush = gp100_vmm_flush,
+	.page = {
+		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
+		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
+		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
+		{}
+	}
+};
+
+int
+gp10b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&gp10b_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
new file mode 100644
index 0000000000000..0cab1ffc9f64b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/if000d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u32 data = addr | 0x00000003; /* PRESENT, RW. */
+	while (ptes--) {
+		VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
+		data += 0x00001000;
+	}
+}
+
+static void
+nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+}
+
+static void
+nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+	nvkm_kmap(pt->memory);
+	while (ptes--)
+		VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
+	nvkm_done(pt->memory);
+#else
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv04_vmm_desc_pgt = {
+	.unmap = nv04_vmm_pgt_unmap,
+	.dma = nv04_vmm_pgt_dma,
+	.sgl = nv04_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv04_vmm_desc_12[] = {
+	{ PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
+	{}
+};
+
+int
+nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+	       struct nvkm_vmm_map *map)
+{
+	union {
+		struct nv04_vmm_map_vn vn;
+	} *args = argv;
+	int ret = -ENOSYS;
+	if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+		VMM_DEBUG(vmm, "args");
+	return ret;
+}
+
+static const struct nvkm_vmm_func
+nv04_vmm = {
+	.valid = nv04_vmm_valid,
+	.page = {
+		{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+		{}
+	}
+};
+
+int
+nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+	      u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	union {
+		struct nv04_vmm_vn vn;
+	} *args = argv;
+	int ret;
+
+	ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
+	if (ret)
+		return ret;
+
+	return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
+}
+
+int
+nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	     struct lock_class_key *key, const char *name,
+	     struct nvkm_vmm **pvmm)
+{
+	struct nvkm_memory *mem;
+	struct nvkm_vmm *vmm;
+	int ret;
+
+	ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
+			    argv, argc, key, name, &vmm);
+	*pvmm = vmm;
+	if (ret)
+		return ret;
+
+	mem = vmm->pd->pt[0]->memory;
+	nvkm_kmap(mem);
+	nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nvkm_wo32(mem, 0x00004, vmm->limit - 1);
+	nvkm_done(mem);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
new file mode 100644
index 0000000000000..b595f130e5738
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u32 data = (addr >> 7) | 0x00000001; /* VALID. */
+	while (ptes--) {
+		VMM_WO032(pt, vmm, ptei++ * 4, data);
+		data += 0x00000020;
+	}
+}
+
+static void
+nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+}
+
+static void
+nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		const u32 data = (*map->dma++ >> 7) | 0x00000001;
+		VMM_WO032(pt, vmm, ptei++ * 4, data);
+	}
+	nvkm_done(pt->memory);
+#else
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv41_vmm_desc_pgt = {
+	.unmap = nv41_vmm_pgt_unmap,
+	.dma = nv41_vmm_pgt_dma,
+	.sgl = nv41_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv41_vmm_desc_12[] = {
+	{ PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
+	{}
+};
+
+static void
+nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+	struct nvkm_device *device = subdev->device;
+
+	mutex_lock(&subdev->mutex);
+	nvkm_wr32(device, 0x100810, 0x00000022);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100810) & 0x00000020)
+			break;
+	);
+	nvkm_wr32(device, 0x100810, 0x00000000);
+	mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+nv41_vmm = {
+	.valid = nv04_vmm_valid,
+	.flush = nv41_vmm_flush,
+	.page = {
+		{ 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+		{}
+	}
+};
+
+int
+nv41_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	     struct lock_class_key *key, const char *name,
+	     struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&nv41_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
new file mode 100644
index 0000000000000..b834e43523343
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  dma_addr_t *list, u32 ptei, u32 ptes)
+{
+	u32 pteo = (ptei << 2) & ~0x0000000f;
+	u32 tmp[4];
+
+	tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+	tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+	tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+	tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+	while (ptes--) {
+		u32 addr = (list ? *list++ : vmm->null) >> 12;
+		switch (ptei++ & 0x3) {
+		case 0:
+			tmp[0] &= ~0x07ffffff;
+			tmp[0] |= addr;
+			break;
+		case 1:
+			tmp[0] &= ~0xf8000000;
+			tmp[0] |= addr << 27;
+			tmp[1] &= ~0x003fffff;
+			tmp[1] |= addr >> 5;
+			break;
+		case 2:
+			tmp[1] &= ~0xffc00000;
+			tmp[1] |= addr << 22;
+			tmp[2] &= ~0x0001ffff;
+			tmp[2] |= addr >> 10;
+			break;
+		case 3:
+			tmp[2] &= ~0xfffe0000;
+			tmp[2] |= addr << 17;
+			tmp[3] &= ~0x00000fff;
+			tmp[3] |= addr >> 15;
+			break;
+		}
+	}
+
+	VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+	VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+	VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+	VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	dma_addr_t tmp[4], i;
+
+	if (ptei & 3) {
+		const u32 pten = min(ptes, 4 - (ptei & 3));
+		for (i = 0; i < pten; i++, addr += 0x1000)
+			tmp[i] = addr;
+		nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+		ptei += pten;
+		ptes -= pten;
+	}
+
+	while (ptes >= 4) {
+		for (i = 0; i < 4; i++, addr += 0x1000)
+			tmp[i] = addr >> 12;
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+		ptes -= 4;
+	}
+
+	if (ptes) {
+		for (i = 0; i < ptes; i++, addr += 0x1000)
+			tmp[i] = addr;
+		nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+	}
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+	nvkm_kmap(pt->memory);
+	if (ptei & 3) {
+		const u32 pten = min(ptes, 4 - (ptei & 3));
+		nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+		ptei += pten;
+		ptes -= pten;
+		map->dma += pten;
+	}
+
+	while (ptes >= 4) {
+		u32 tmp[4], i;
+		for (i = 0; i < 4; i++)
+			tmp[i] = *map->dma++ >> 12;
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+		ptes -= 4;
+	}
+
+	if (ptes) {
+		nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+		map->dma += ptes;
+	}
+	nvkm_done(pt->memory);
+#else
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	nvkm_kmap(pt->memory);
+	if (ptei & 3) {
+		const u32 pten = min(ptes, 4 - (ptei & 3));
+		nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+		ptei += pten;
+		ptes -= pten;
+	}
+
+	while (ptes > 4) {
+		VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+		VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+		VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+		VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+		ptes -= 4;
+	}
+
+	if (ptes)
+		nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+	nvkm_done(pt->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+nv44_vmm_desc_pgt = {
+	.unmap = nv44_vmm_pgt_unmap,
+	.dma = nv44_vmm_pgt_dma,
+	.sgl = nv44_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv44_vmm_desc_12[] = {
+	{ PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
+	{}
+};
+
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+	nvkm_wr32(device, 0x100808, 0x000000020);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100808) & 0x00000001)
+			break;
+	);
+	nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
+static const struct nvkm_vmm_func
+nv44_vmm = {
+	.valid = nv04_vmm_valid,
+	.flush = nv44_vmm_flush,
+	.page = {
+		{ 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+		{}
+	}
+};
+
+int
+nv44_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	     struct lock_class_key *key, const char *name,
+	     struct nvkm_vmm **pvmm)
+{
+	struct nvkm_subdev *subdev = &mmu->subdev;
+	struct nvkm_vmm *vmm;
+	int ret;
+
+	ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, addr, size,
+			    argv, argc, key, name, &vmm);
+	*pvmm = vmm;
+	if (ret)
+		return ret;
+
+	vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
+					&vmm->null, GFP_KERNEL);
+	if (!vmm->nullp) {
+		nvkm_warn(subdev, "unable to allocate dummy pages\n");
+		vmm->null = 0;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
new file mode 100644
index 0000000000000..863a2edd98616
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
+
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+	u64 next = addr | map->type, data;
+	u32 pten;
+	int log2blk;
+
+	map->type += ptes * map->ctag;
+
+	while (ptes) {
+		for (log2blk = 7; log2blk >= 0; log2blk--) {
+			pten = 1 << log2blk;
+			if (ptes >= pten && IS_ALIGNED(ptei, pten))
+				break;
+		}
+
+		data  = next | (log2blk << 7);
+		next += pten * map->next;
+		ptes -= pten;
+
+		while (pten--)
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+	}
+}
+
+static void
+nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	if (map->page->shift == PAGE_SHIFT) {
+		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+		nvkm_kmap(pt->memory);
+		while (ptes--) {
+			const u64 data = *map->dma++ | map->type;
+			VMM_WO064(pt, vmm, ptei++ * 8, data);
+			map->type += map->ctag;
+		}
+		nvkm_done(pt->memory);
+		return;
+	}
+
+	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+		   struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgt = {
+	.unmap = nv50_vmm_pgt_unmap,
+	.mem = nv50_vmm_pgt_mem,
+	.dma = nv50_vmm_pgt_dma,
+	.sgl = nv50_vmm_pgt_sgl,
+};
+
+static bool
+nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
+{
+	struct nvkm_mmu_pt *pt;
+	u64 data = 0xdeadcafe00000000ULL;
+	if (pgt && (pt = pgt->pt[0])) {
+		switch (pgt->page) {
+		case 16: data = 0x00000001; break;
+		case 12: data = 0x00000003;
+			switch (nvkm_memory_size(pt->memory)) {
+			case 0x100000: data |= 0x00000000; break;
+			case 0x040000: data |= 0x00000020; break;
+			case 0x020000: data |= 0x00000040; break;
+			case 0x010000: data |= 0x00000060; break;
+			default:
+				WARN_ON(1);
+				return false;
+			}
+			break;
+		default:
+			WARN_ON(1);
+			return false;
+		}
+
+		switch (nvkm_memory_target(pt->memory)) {
+		case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
+		case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
+		case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
+		default:
+			WARN_ON(1);
+			return false;
+		}
+
+		data |= pt->addr;
+	}
+	*pdata = data;
+	return true;
+}
+
+static void
+nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+	struct nvkm_vmm_join *join;
+	u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
+	u64 data;
+
+	if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
+		return;
+
+	list_for_each_entry(join, &vmm->join, head) {
+		nvkm_kmap(join->inst);
+		nvkm_wo64(join->inst, pdeo, data);
+		nvkm_done(join->inst);
+	}
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgd = {
+	.pde = nv50_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_12[] = {
+	{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
+	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+	{}
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_16[] = {
+	{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
+	{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+	{}
+};
+
+static void
+nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+	struct nvkm_device *device = subdev->device;
+	int i, id;
+
+	mutex_lock(&subdev->mutex);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if (!atomic_read(&vmm->engref[i]))
+			continue;
+
+		/* unfortunate hw bug workaround... */
+		if (i == NVKM_ENGINE_GR && device->gr) {
+			int ret = nvkm_gr_tlb_flush(device->gr);
+			if (ret != -ENODEV)
+				continue;
+		}
+
+		switch (i) {
+		case NVKM_ENGINE_GR    : id = 0x00; break;
+		case NVKM_ENGINE_VP    :
+		case NVKM_ENGINE_MSPDEC: id = 0x01; break;
+		case NVKM_SUBDEV_BAR   : id = 0x06; break;
+		case NVKM_ENGINE_MSPPP :
+		case NVKM_ENGINE_MPEG  : id = 0x08; break;
+		case NVKM_ENGINE_BSP   :
+		case NVKM_ENGINE_MSVLD : id = 0x09; break;
+		case NVKM_ENGINE_CIPHER:
+		case NVKM_ENGINE_SEC   : id = 0x0a; break;
+		case NVKM_ENGINE_CE0   : id = 0x0d; break;
+		default:
+			continue;
+		}
+
+		nvkm_wr32(device, 0x100c80, (id << 16) | 1);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+				break;
+		) < 0)
+			nvkm_error(subdev, "%s mmu invalidate timeout\n",
+				   nvkm_subdev_name[i]);
+	}
+	mutex_unlock(&subdev->mutex);
+}
+
+static int
+nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+	       struct nvkm_vmm_map *map)
+{
+	const struct nvkm_vmm_page *page = map->page;
+	union {
+		struct nv50_vmm_map_vn vn;
+		struct nv50_vmm_map_v0 v0;
+	} *args = argv;
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	struct nvkm_ram *ram = device->fb->ram;
+	struct nvkm_memory *memory = map->memory;
+	u8  aper, kind, comp, priv, ro;
+	int kindn, ret = -ENOSYS;
+	const u8 *kindm;
+
+	map->type = map->ctag = 0;
+	map->next = 1 << page->shift;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		ro   = !!args->v0.ro;
+		priv = !!args->v0.priv;
+		kind = args->v0.kind & 0x7f;
+		comp = args->v0.comp & 0x03;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		ro   = 0;
+		priv = 0;
+		kind = 0x00;
+		comp = 0;
+	} else {
+		VMM_DEBUG(vmm, "args");
+		return ret;
+	}
+
+	switch (nvkm_memory_target(memory)) {
+	case NVKM_MEM_TARGET_VRAM:
+		if (ram->stolen) {
+			map->type |= ram->stolen;
+			aper = 3;
+		} else {
+			aper = 0;
+		}
+		break;
+	case NVKM_MEM_TARGET_HOST:
+		aper = 2;
+		break;
+	case NVKM_MEM_TARGET_NCOH:
+		aper = 3;
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+	if (kind >= kindn || kindm[kind] == 0x7f) {
+		VMM_DEBUG(vmm, "kind %02x", kind);
+		return -EINVAL;
+	}
+
+	if (map->mem && map->mem->type != kindm[kind]) {
+		VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
+			  kindm[kind], map->mem->type);
+		return -EINVAL;
+	}
+
+	if (comp) {
+		u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
+		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+			return -EINVAL;
+		}
+
+		ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+					   &map->tags);
+		if (ret) {
+			VMM_DEBUG(vmm, "comp %d", ret);
+			return ret;
+		}
+
+		if (map->tags->mn) {
+			u32 tags = map->tags->mn->offset + (map->offset >> 16);
+			map->ctag |= (u64)comp << 49;
+			map->type |= (u64)comp << 47;
+			map->type |= (u64)tags << 49;
+			map->next |= map->ctag;
+		}
+	}
+
+	map->type |= BIT(0); /* Valid. */
+	map->type |= (u64)ro << 3;
+	map->type |= (u64)aper << 4;
+	map->type |= (u64)priv << 6;
+	map->type |= (u64)kind << 40;
+	return 0;
+}
+
+static void
+nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	struct nvkm_vmm_join *join;
+
+	list_for_each_entry(join, &vmm->join, head) {
+		if (join->inst == inst) {
+			list_del(&join->head);
+			kfree(join);
+			break;
+		}
+	}
+}
+
+static int
+nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
+	struct nvkm_vmm_join *join;
+	int ret = 0;
+	u64 data;
+	u32 pdei;
+
+	if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
+		return -ENOMEM;
+	join->inst = inst;
+	list_add_tail(&join->head, &vmm->join);
+
+	nvkm_kmap(join->inst);
+	for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
+		if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
+			ret = -EINVAL;
+			break;
+		}
+		nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
+	}
+	nvkm_done(join->inst);
+	return ret;
+}
+
+static const struct nvkm_vmm_func
+nv50_vmm = {
+	.join = nv50_vmm_join,
+	.part = nv50_vmm_part,
+	.valid = nv50_vmm_valid,
+	.flush = nv50_vmm_flush,
+	.page_block = 1 << 29,
+	.page = {
+		{ 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
+		{ 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+		{}
+	}
+};
+
+int
+nv50_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	     struct lock_class_key *key, const char *name,
+	     struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&nv50_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index a4cb82495cee3..b1b1f3626b962 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -87,7 +87,7 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
 	if (pci->irq >= 0) {
 		free_irq(pci->irq, pci);
 		pci->irq = -1;
-	};
+	}
 
 	if (pci->agp.bridge)
 		nvkm_agp_fini(pci);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 73ca1203281d6..5e91b3f90065f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -39,7 +39,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
 {
 	struct gm200_secboot *gsb = gm200_secboot(sb);
 	struct nvkm_subdev *subdev = &gsb->base.subdev;
-	struct nvkm_vma vma;
+	struct nvkm_vma *vma = NULL;
 	u32 start_address;
 	int ret;
 
@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
 		return ret;
 
 	/* Map the HS firmware so the HS bootloader can see it */
-	ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+	ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
 	if (ret) {
 		nvkm_falcon_put(falcon, subdev);
 		return ret;
 	}
 
+	ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
+	if (ret)
+		goto end;
+
 	/* Reset and set the falcon up */
 	ret = nvkm_falcon_reset(falcon);
 	if (ret)
@@ -61,7 +65,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
 	nvkm_falcon_bind_context(falcon, gsb->inst);
 
 	/* Load the HS bootloader into the falcon's IMEM/DMEM */
-	ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
+	ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
 	if (ret < 0)
 		goto end;
 
@@ -91,7 +95,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
 	nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
 
 	/* We don't need the ACR firmware anymore */
-	nvkm_gpuobj_unmap(&vma);
+	nvkm_vmm_put(gsb->vmm, &vma);
 	nvkm_falcon_put(falcon, subdev);
 
 	return ret;
@@ -102,37 +106,26 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
 {
 	struct gm200_secboot *gsb = gm200_secboot(sb);
 	struct nvkm_device *device = sb->subdev.device;
-	struct nvkm_vm *vm;
-	const u64 vm_area_len = 600 * 1024;
 	int ret;
 
 	/* Allocate instance block and VM */
-	ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+			      &gsb->inst);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
+	ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
+			   &gsb->vmm);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_new(device, 0, vm_area_len, 0, NULL, &vm);
-	if (ret)
-		return ret;
-
-	atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
+	atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
+	gsb->vmm->debug = gsb->base.subdev.debug;
 
-	ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
-	nvkm_vm_ref(NULL, &vm, NULL);
+	ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
 	if (ret)
 		return ret;
 
-	nvkm_kmap(gsb->inst);
-	nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
-	nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
-	nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
-	nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
-	nvkm_done(gsb->inst);
-
 	if (sb->acr->func->oneinit) {
 		ret = sb->acr->func->oneinit(sb->acr, sb);
 		if (ret)
@@ -160,9 +153,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
 
 	sb->acr->func->dtor(sb->acr);
 
-	nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
-	nvkm_gpuobj_del(&gsb->pgd);
-	nvkm_gpuobj_del(&gsb->inst);
+	nvkm_vmm_part(gsb->vmm, gsb->inst);
+	nvkm_vmm_unref(&gsb->vmm);
+	nvkm_memory_unref(&gsb->inst);
 
 	return gsb;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index c8ab3d76bdef7..62c5e162099aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -29,9 +29,8 @@ struct gm200_secboot {
 	struct nvkm_secboot base;
 
 	/* Instance block & address space used for HS FW execution */
-	struct nvkm_gpuobj *inst;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
+	struct nvkm_memory *inst;
+	struct nvkm_vmm *vmm;
 };
 #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index ee989210725e9..6f10b098676ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -183,7 +183,7 @@ acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
 			  break;
 	);
 	if (reg & BIT(4)) {
-		nvkm_debug(subdev, "applying workaround for start bug...");
+		nvkm_debug(subdev, "applying workaround for start bug...\n");
 		nvkm_falcon_start(sb->boot_falcon);
 		nvkm_msec(subdev->device, 1,
 			if ((reg = nvkm_rd32(subdev->device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index 885e919a87205..d9091f029506c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -25,6 +25,7 @@
 
 #include <subdev/secboot.h>
 #include <subdev/mmu.h>
+struct nvkm_gpuobj;
 
 struct nvkm_secboot_func {
 	int (*oneinit)(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 2bafcc1d18188..7ba56b12badd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/therm/gt215.o
 nvkm-y += nvkm/subdev/therm/gf119.o
 nvkm-y += nvkm/subdev/therm/gm107.o
 nvkm-y += nvkm/subdev/therm/gm200.o
+nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 952a7cb0a59a0..f27fc6d0d4c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -341,7 +341,8 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
 {
 	struct nvkm_therm *therm = nvkm_therm(subdev);
 
-	therm->func->init(therm);
+	if (therm->func->init)
+		therm->func->init(therm);
 
 	if (therm->suspend >= 0) {
 		/* restore the pwm value only when on manual or auto mode */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
new file mode 100644
index 0000000000000..9f0dea3f61dc4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Rhys Kidd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rhys Kidd
+ */
+#include "priv.h"
+
+static int
+gp100_temp_get(struct nvkm_therm *therm)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	u32 tsensor = nvkm_rd32(device, 0x020460);
+	u32 inttemp = (tsensor & 0x0001fff8);
+
+	/* device SHADOWed */
+	if (tsensor & 0x40000000)
+		nvkm_trace(subdev, "reading temperature from SHADOWed sensor\n");
+
+	/* device valid */
+	if (tsensor & 0x20000000)
+		return (inttemp >> 8);
+	else
+		return -ENODEV;
+}
+
+static const struct nvkm_therm_func
+gp100_therm = {
+	.temp_get = gp100_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gp100_therm_new(struct nvkm_device *device, int index,
+		struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+}