Skip to content

Commit

Permalink
Merge tag 'topic/i915-ttm-2021-06-11' of git://anongit.freedesktop.or…
Browse files Browse the repository at this point in the history
…g/drm/drm-misc into drm-intel-gt-next

drm-misc and drm-intel pull request for topic/i915-ttm:
- Convert i915 lmem handling to ttm.
- Add a patch to temporarily add a driver_private member to vma_node.
- Use this to allow mixed object mmap handling for i915.

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/eb71ee2d-3413-6ca8-0b7c-a58695f00b77@linux.intel.com
  • Loading branch information
Joonas Lahtinen committed Jun 14, 2021
2 parents c649432 + cf3e3e8 commit 0e9d217
Show file tree
Hide file tree
Showing 24 changed files with 1,039 additions and 250 deletions.
9 changes: 0 additions & 9 deletions drivers/gpu/drm/drm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1148,15 +1148,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}

if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
drm_gem_object_put(obj);
return -EINVAL;
}

vma->vm_flags &= ~VM_MAYWRITE;
}

ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);

Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ gem-y += \
gem/i915_gem_stolen.o \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/display/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -11771,7 +11771,7 @@ intel_user_framebuffer_create(struct drm_device *dev,

/* object is backed with LMEM for discrete */
i915 = to_i915(obj->base.dev);
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
return ERR_PTR(-EREMOTE);
Expand Down
9 changes: 3 additions & 6 deletions drivers/gpu/drm/i915/gem/i915_gem_create.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
return -E2BIG;

/*
* For now resort to CPU based clearing for device local-memory, in the
* near future this will use the blitter engine for accelerated, GPU
* based clearing.
* I915_BO_ALLOC_USER will make sure the object is cleared before
* any user access.
*/
flags = 0;
if (mr->type == INTEL_MEMORY_LOCAL)
flags = I915_BO_ALLOC_CPU_CLEAR;
flags = I915_BO_ALLOC_USER;

ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
Expand Down
126 changes: 41 additions & 85 deletions drivers/gpu/drm/i915/gem/i915_gem_lmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,74 +4,10 @@
*/

#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"

static void lmem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}

static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;

flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;

obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);

/* Range manager is always contigous */
if (obj->mm.region->is_range_manager)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}

__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));

if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
void __iomem *vaddr =
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);

if (!vaddr) {
struct sg_table *pages =
__i915_gem_object_unset_pages(obj);

if (!IS_ERR_OR_NULL(pages))
lmem_put_pages(obj, pages);
}

memset_io(vaddr, 0, obj->base.size);
io_mapping_unmap(vaddr);
}

return 0;
}

const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,

.get_pages = lmem_get_pages,
.put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};

void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
Expand All @@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}

/**
* i915_gem_object_validates_to_lmem - Whether the object is resident in
* lmem when pages are present.
* @obj: The object to check.
*
* Migratable objects residency may change from under us if the object is
* not pinned or locked. This function is intended to be used to check whether
* the object can only reside in lmem when pages are present.
*
* Return: Whether the object is always resident in lmem when pages are
* present.
*/
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);

return !i915_gem_object_migratable(obj) &&
mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}

/**
* i915_gem_object_is_lmem - Whether the object is resident in
* lmem
* @obj: The object to check.
*
* Even if an object is allowed to migrate and change memory region,
* this function checks whether it will always be present in lmem when
* valid *or* if that's not the case, whether it's currently resident in lmem.
* For migratable and evictable objects, the latter only makes sense when
* the object is locked.
*
* Return: Whether the object migratable but resident in lmem, or not
* migratable and will be present in lmem when valid.
*/
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = obj->mm.region;
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);

#ifdef CONFIG_LOCKDEP
if (i915_gem_object_migratable(obj) &&
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
Expand All @@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}

int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;

drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);

obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;

i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);

i915_gem_object_init_memory_region(obj, mem);

return 0;
}
5 changes: 0 additions & 5 deletions drivers/gpu/drm/i915/gem/i915_gem_lmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);

int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);

#endif /* !__I915_GEM_LMEM_H */
83 changes: 57 additions & 26 deletions drivers/gpu/drm/i915/gem/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "i915_gem_mman.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_gem_ttm.h"
#include "i915_vma.h"

static inline bool
Expand Down Expand Up @@ -623,6 +624,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;

GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);

mmo = lookup_mmo(obj, mmap_type);
if (mmo)
goto out;
Expand Down Expand Up @@ -665,40 +668,47 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
}

static int
__assign_mmap_offset(struct drm_file *file,
u32 handle,
__assign_mmap_offset(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
u64 *offset)
u64 *offset, struct drm_file *file)
{
struct drm_i915_gem_object *obj;
struct i915_mmap_offset *mmo;
int err;

obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
if (i915_gem_object_never_mmap(obj))
return -ENODEV;

if (i915_gem_object_never_mmap(obj)) {
err = -ENODEV;
goto out;
if (obj->ops->mmap_offset) {
*offset = obj->ops->mmap_offset(obj);
return 0;
}

if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return -ENODEV;

mmo = mmap_offset_attach(obj, mmap_type, file);
if (IS_ERR(mmo)) {
err = PTR_ERR(mmo);
goto out;
}
if (IS_ERR(mmo))
return PTR_ERR(mmo);

*offset = drm_vma_node_offset_addr(&mmo->vma_node);
err = 0;
out:
return 0;
}

static int
__assign_mmap_offset_handle(struct drm_file *file,
u32 handle,
enum i915_mmap_type mmap_type,
u64 *offset)
{
struct drm_i915_gem_object *obj;
int err;

obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;

err = __assign_mmap_offset(obj, mmap_type, offset, file);
i915_gem_object_put(obj);
return err;
}
Expand All @@ -718,7 +728,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
else
mmap_type = I915_MMAP_TYPE_GTT;

return __assign_mmap_offset(file, handle, mmap_type, offset);
return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
}

/**
Expand Down Expand Up @@ -786,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}

return __assign_mmap_offset(file, args->handle, type, &args->offset);
return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
}

static void vm_open(struct vm_area_struct *vma)
Expand Down Expand Up @@ -890,8 +900,18 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* destroyed and will be invalid when the vma manager lock
* is released.
*/
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
if (!node->driver_private) {
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);

GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
base.vma_node));

GEM_BUG_ON(obj && !obj->ops->mmap_ops);
}
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
Expand All @@ -913,7 +933,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}

vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = mmo;

if (i915_gem_object_has_iomem(obj))
vma->vm_flags |= VM_IO;

/*
* We keep the ref on mmo->obj, not vm_file, but we require
Expand All @@ -927,6 +949,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
/* Drop the initial creation reference, the vma is now holding one. */
fput(anon);

if (obj->ops->mmap_ops) {
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
vma->vm_private_data = node->driver_private;
return 0;
}

vma->vm_private_data = mmo;

switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
vma->vm_page_prot =
Expand Down
Loading

0 comments on commit 0e9d217

Please sign in to comment.