Skip to content

Commit

Permalink
drm/i915/ttm: switch over to ttm_buddy_man
Browse files Browse the repository at this point in the history
Move back to the buddy allocator for managing device local memory, and
restore the lost mock selftests. Keep around the range manager related
bits, since we likely need this for managing stolen at some point. For
stolen we also don't need to reserve anything so no need to support a
generic reserve interface.

v2(Thomas):
    - bo->page_alignment is in page units, not bytes

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210616152501.394518-6-matthew.auld@intel.com
  • Loading branch information
Matthew Auld committed Jun 16, 2021
1 parent 687c7d0 commit d53ec32
Show file tree
Hide file tree
Showing 6 changed files with 180 additions and 216 deletions.
20 changes: 2 additions & 18 deletions drivers/gpu/drm/i915/gem/i915_gem_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);

/* Will do for now. Our pinned objects are still on TTM's LRU lists */
if (!i915_gem_object_evictable(obj))
return false;

/* This isn't valid with a buddy allocator */
return ttm_bo_eviction_valuable(bo, place);
return i915_gem_object_evictable(obj);
}

static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
Expand Down Expand Up @@ -654,20 +650,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
enum ttm_bo_type bo_type;
size_t alignment = 0;
int ret;

/* Adjust alignment to GPU- and CPU huge page sizes. */

if (mem->is_range_manager) {
if (size >= SZ_1G)
alignment = SZ_1G >> PAGE_SHIFT;
else if (size >= SZ_2M)
alignment = SZ_2M >> PAGE_SHIFT;
else if (size >= SZ_64K)
alignment = SZ_64K >> PAGE_SHIFT;
}

drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
i915_gem_object_init_memory_region(obj, mem);
Expand All @@ -688,7 +672,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
*/
obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
bo_type, &i915_sys_placement, alignment,
bo_type, &i915_sys_placement, 1,
true, NULL, NULL, i915_ttm_bo_destroy);

if (!ret)
Expand Down
55 changes: 4 additions & 51 deletions drivers/gpu/drm/i915/intel_memory_region.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

#include "intel_memory_region.h"
#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"

static const struct {
u16 class;
Expand All @@ -28,11 +29,6 @@ static const struct {
},
};

struct intel_region_reserve {
struct list_head link;
struct ttm_resource *res;
};

struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
Expand Down Expand Up @@ -63,27 +59,6 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}

/**
* intel_memory_region_unreserve - Unreserve all previously reserved
* ranges
* @mem: The region containing the reserved ranges.
*/
void intel_memory_region_unreserve(struct intel_memory_region *mem)
{
struct intel_region_reserve *reserve, *next;

if (!mem->priv_ops || !mem->priv_ops->free)
return;

mutex_lock(&mem->mm_lock);
list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
list_del(&reserve->link);
mem->priv_ops->free(mem, reserve->res);
kfree(reserve);
}
mutex_unlock(&mem->mm_lock);
}

/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
Expand All @@ -96,28 +71,11 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{
int ret;
struct intel_region_reserve *reserve;

if (!mem->priv_ops || !mem->priv_ops->reserve)
return -EINVAL;

reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
if (!reserve)
return -ENOMEM;
struct ttm_resource_manager *man = mem->region_private;

reserve->res = mem->priv_ops->reserve(mem, offset, size);
if (IS_ERR(reserve->res)) {
ret = PTR_ERR(reserve->res);
kfree(reserve);
return ret;
}

mutex_lock(&mem->mm_lock);
list_add_tail(&reserve->link, &mem->reserved);
mutex_unlock(&mem->mm_lock);
GEM_BUG_ON(mem->is_range_manager);

return 0;
return i915_ttm_buddy_man_reserve(man, offset, size);
}

struct intel_memory_region *
Expand Down Expand Up @@ -149,9 +107,6 @@ intel_memory_region_create(struct drm_i915_private *i915,

mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->reserved);

mutex_init(&mem->mm_lock);

if (ops->init) {
err = ops->init(mem);
Expand Down Expand Up @@ -182,11 +137,9 @@ static void __intel_memory_region_destroy(struct kref *kref)
struct intel_memory_region *mem =
container_of(kref, typeof(*mem), kref);

intel_memory_region_unreserve(mem);
if (mem->ops->release)
mem->ops->release(mem);

mutex_destroy(&mem->mm_lock);
mutex_destroy(&mem->objects.lock);
kfree(mem);
}
Expand Down
17 changes: 0 additions & 17 deletions drivers/gpu/drm/i915/intel_memory_region.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,28 +59,17 @@ struct intel_memory_region_ops {
unsigned int flags);
};

struct intel_memory_region_private_ops {
struct ttm_resource *(*reserve)(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
void (*free)(struct intel_memory_region *mem,
struct ttm_resource *res);
};

struct intel_memory_region {
struct drm_i915_private *i915;

const struct intel_memory_region_ops *ops;
const struct intel_memory_region_private_ops *priv_ops;

struct io_mapping iomap;
struct resource region;

/* For fake LMEM */
struct drm_mm_node fake_mappable;

struct mutex mm_lock;

struct kref kref;

resource_size_t io_start;
Expand All @@ -94,17 +83,13 @@ struct intel_memory_region {
char name[16];
bool private; /* not for userspace */

struct list_head reserved;

dma_addr_t remap_addr;

struct {
struct mutex lock; /* Protects access to objects */
struct list_head list;
} objects;

size_t chunk_size;
unsigned int max_order;
bool is_range_manager;

void *region_private;
Expand Down Expand Up @@ -138,8 +123,6 @@ __printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
const char *fmt, ...);

void intel_memory_region_unreserve(struct intel_memory_region *mem);

int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
Expand Down
122 changes: 46 additions & 76 deletions drivers/gpu/drm/i915/intel_region_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "i915_drv.h"
#include "i915_scatterlist.h"
#include "i915_ttm_buddy_manager.h"

#include "intel_region_ttm.h"

Expand Down Expand Up @@ -67,72 +68,28 @@ int intel_region_to_ttm_type(const struct intel_memory_region *mem)
return type;
}

static struct ttm_resource *
intel_region_ttm_resource_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{
struct ttm_resource_manager *man = mem->region_private;
struct ttm_place place = {};
struct ttm_buffer_object mock_bo = {};
struct ttm_resource *res;
int ret;

/*
* Having to use a mock_bo is unfortunate but stems from some
* drivers having private managers that insist to know what the
* allocate memory is intended for, using it to send private
* data to the manager. Also recently the bo has been used to send
* alignment info to the manager. Assume that apart from the latter,
* none of the managers we use will ever access the buffer object
* members, hoping we can pass the alignment info in the
* struct ttm_place in the future.
*/

place.fpfn = offset >> PAGE_SHIFT;
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
mock_bo.base.size = size;
ret = man->func->alloc(man, &mock_bo, &place, &res);
if (ret == -ENOSPC)
ret = -ENXIO;

return ret ? ERR_PTR(ret) : res;
}

/**
* intel_region_ttm_resource_free - Free a resource allocated from a resource manager
* @mem: The region the resource was allocated from.
* @res: The opaque resource representing an allocation.
* intel_region_ttm_init - Initialize a memory region for TTM.
* @mem: The region to initialize.
*
* This function initializes a suitable TTM resource manager for the
* region, and if it's a LMEM region type, attaches it to the TTM
* device. MOCK regions are NOT attached to the TTM device, since we don't
* have one for the mock selftests.
*
* Return: 0 on success, negative error code on failure.
*/
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res)
{
struct ttm_resource_manager *man = mem->region_private;

man->func->free(man, res);
}

static const struct intel_memory_region_private_ops priv_ops = {
.reserve = intel_region_ttm_resource_reserve,
.free = intel_region_ttm_resource_free,
};

int intel_region_ttm_init(struct intel_memory_region *mem)
{
struct ttm_device *bdev = &mem->i915->bdev;
int mem_type = intel_region_to_ttm_type(mem);
int ret;

ret = ttm_range_man_init(bdev, mem_type, false,
resource_size(&mem->region) >> PAGE_SHIFT);
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region), PAGE_SIZE);
if (ret)
return ret;

mem->chunk_size = PAGE_SIZE;
mem->max_order =
get_order(rounddown_pow_of_two(resource_size(&mem->region)));
mem->is_range_manager = true;
mem->priv_ops = &priv_ops;
mem->region_private = ttm_manager_type(bdev, mem_type);

return 0;
Expand All @@ -150,8 +107,8 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
{
int ret;

ret = ttm_range_man_fini(&mem->i915->bdev,
intel_region_to_ttm_type(mem));
ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
intel_region_to_ttm_type(mem));
GEM_WARN_ON(ret);
mem->region_private = NULL;
}
Expand All @@ -171,12 +128,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
struct ttm_resource *res)
{
struct ttm_range_mgr_node *range_node =
container_of(res, typeof(*range_node), base);
if (mem->is_range_manager) {
struct ttm_range_mgr_node *range_node =
to_ttm_range_mgr_node(res);

GEM_WARN_ON(!mem->is_range_manager);
return i915_sg_from_mm_node(&range_node->mm_nodes[0],
mem->region.start);
return i915_sg_from_mm_node(&range_node->mm_nodes[0],
mem->region.start);
} else {
return i915_sg_from_buddy_resource(res, mem->region.start);
}
}

#ifdef CONFIG_DRM_I915_SELFTEST
Expand Down Expand Up @@ -206,25 +166,35 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
struct ttm_resource *res;
int ret;

/*
* We ignore the flags for now since we're using the range
* manager and contigous and min page size would be fulfilled
* by default if size is min page size aligned.
*/
mock_bo.base.size = size;

if (mem->is_range_manager) {
if (size >= SZ_1G)
mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
else if (size >= SZ_2M)
mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
else if (size >= SZ_64K)
mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
}
mock_bo.page_alignment = 1;
place.flags = flags;

ret = man->func->alloc(man, &mock_bo, &place, &res);
if (ret == -ENOSPC)
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res;
}

#endif

void intel_region_ttm_node_free(struct intel_memory_region *mem,
struct ttm_resource *res)
{
struct ttm_resource_manager *man = mem->region_private;

man->func->free(man, res);
}

/**
* intel_region_ttm_resource_free - Free a resource allocated from a resource manager
* @mem: The region the resource was allocated from.
* @res: The opaque resource representing an allocation.
*/
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res)
{
struct ttm_resource_manager *man = mem->region_private;

man->func->free(man, res);
}
Loading

0 comments on commit d53ec32

Please sign in to comment.