Skip to content

Commit

Permalink
drm/ttm: Add a macro to perform LRU iteration
Browse files Browse the repository at this point in the history
Following the design direction communicated here:

https://lore.kernel.org/linux-mm/b7491378-defd-4f1c-31e2-29e4c77e2d67@amd.com/T/#ma918844aa8a6efe8768fdcda0c6590d5c93850c9

Export a LRU walker for driver shrinker use. The walker
initially supports only trylocking, since that's the
method used by shrinkes. The walker makes use of
scoped_guard() to allow exiting from the LRU walk loop
without performing any explicit unlocking or
cleanup.

v8:
- Split out from another patch.
- Use a struct for bool arguments to increase readability (Matt Brost).
- Unmap user-space cpu-mappings before shrinking pages.
- Explain non-fatal error codes (Matt Brost)

v10:
- Instead of using the existing helper, Wrap the interface inside out and
  provide a loop to de-midlayer things the LRU iteration (Christian König).
- Removing the R-B by Matt Brost since the patch was significantly changed.

v11:
- Split the patch up to include just the LRU walk helper.

v12:
- Indent after scoped_guard() (Matt Brost)

v15:
- Adapt to new definition of scoped_guard()

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/intel-xe/20250305092220.123405-5-thomas.hellstrom@linux.intel.com
  • Loading branch information
Thomas Hellström committed Mar 5, 2025
1 parent 8ae875f commit f3bcfd0
Show file tree
Hide file tree
Showing 2 changed files with 208 additions and 4 deletions.
140 changes: 136 additions & 4 deletions drivers/gpu/drm/ttm/ttm_bo_util.c
Original file line number Diff line number Diff line change
Expand Up @@ -769,12 +769,10 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
return ret;
}

static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
struct ttm_operation_ctx *ctx = walk->ctx;

*needs_unlock = false;

if (dma_resv_trylock(bo->base.resv)) {
Expand Down Expand Up @@ -877,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
* since if we do it the other way around, and the trylock fails,
* we need to drop the lru lock to put the bo.
*/
if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
bo_locked = true;
else if (!walk->ticket || walk->ctx->no_wait_gpu ||
walk->trylock_only)
Expand Down Expand Up @@ -920,3 +918,137 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,

return progress;
}
EXPORT_SYMBOL(ttm_lru_walk_for_evict);

static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
{
struct ttm_buffer_object *bo = curs->bo;

if (bo) {
if (curs->needs_unlock)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
curs->bo = NULL;
}
}

/**
* ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
* and clean up any iteration it was used for.
* @curs: The cursor.
*/
void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
{
spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;

ttm_bo_lru_cursor_cleanup_bo(curs);
spin_lock(lru_lock);
ttm_resource_cursor_fini(&curs->res_curs);
spin_unlock(lru_lock);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);

/**
* ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
* @curs: The ttm_bo_lru_cursor to initialize.
* @man: The ttm resource_manager whose LRU lists to iterate over.
* @ctx: The ttm_operation_ctx to govern the locking.
*
* Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
* or prelocked buffer objects are available as detailed by
* @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
* supported.
*
* Return: Pointer to @curs. The function does not fail.
*/
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
struct ttm_operation_ctx *ctx)
{
memset(curs, 0, sizeof(*curs));
ttm_resource_cursor_init(&curs->res_curs, man);
curs->ctx = ctx;

return curs;
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_init);

static struct ttm_buffer_object *
ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
{
struct ttm_buffer_object *bo = res->bo;

if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
return NULL;

if (!ttm_bo_get_unless_zero(bo)) {
if (curs->needs_unlock)
dma_resv_unlock(bo->base.resv);
return NULL;
}

curs->bo = bo;
return bo;
}

/**
* ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
* to find and lock buffer object.
* @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
* ttm_bo_lru_cursor_first().
*
* Return: A pointer to a locked and reference-counted buffer object,
* or NULL if none could be found and looping should be terminated.
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
{
spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
struct ttm_resource *res = NULL;
struct ttm_buffer_object *bo;

ttm_bo_lru_cursor_cleanup_bo(curs);

spin_lock(lru_lock);
for (;;) {
res = ttm_resource_manager_next(&curs->res_curs);
if (!res)
break;

bo = ttm_bo_from_res_reserved(res, curs);
if (bo)
break;
}

spin_unlock(lru_lock);
return res ? bo : NULL;
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_next);

/**
* ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
* to find and lock buffer object.
* @curs: The cursor initialized using ttm_bo_lru_cursor_init().
*
* Return: A pointer to a locked and reference-counted buffer object,
* or NULL if none could be found and looping should be terminated.
*/
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
{
spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
struct ttm_buffer_object *bo;
struct ttm_resource *res;

spin_lock(lru_lock);
res = ttm_resource_manager_first(&curs->res_curs);
if (!res) {
spin_unlock(lru_lock);
return NULL;
}

bo = ttm_bo_from_res_reserved(res, curs);
spin_unlock(lru_lock);

return bo ? bo : ttm_bo_lru_cursor_next(curs);
}
EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
72 changes: 72 additions & 0 deletions include/drm/ttm/ttm_bo.h
Original file line number Diff line number Diff line change
Expand Up @@ -467,4 +467,76 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
int ttm_bo_populate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);

/* Driver LRU walk helpers initially targeted for shrinking. */

/**
* struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
*/
struct ttm_bo_lru_cursor {
/** @res_curs: Embedded struct ttm_resource_cursor. */
struct ttm_resource_cursor res_curs;
/**
* @ctx: The struct ttm_operation_ctx used while looping.
* governs the locking mode.
*/
struct ttm_operation_ctx *ctx;
/**
* @bo: Buffer object pointer if a buffer object is refcounted,
* NULL otherwise.
*/
struct ttm_buffer_object *bo;
/**
* @needs_unlock: Valid iff @bo != NULL. The bo resv needs
* unlock before the next iteration or after loop exit.
*/
bool needs_unlock;
};

void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);

struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
struct ttm_operation_ctx *ctx);

struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);

struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);

/*
* Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
*/
DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
if (_T) {ttm_bo_lru_cursor_fini(_T); },
ttm_bo_lru_cursor_init(curs, man, ctx),
struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
struct ttm_operation_ctx *ctx);
static inline void *
class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
{ return *_T; }
#define class_ttm_bo_lru_cursor_is_conditional false

/**
* ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
* resources on LRU lists.
* @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
* @_man: The resource manager whose LRU lists to iterate over.
* @_ctx: The struct ttm_operation_context to govern the @_bo locking.
* @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
* for the current iteration.
*
* Iterate over all resources of @_man and for each resource, attempt to
* reference and lock (using the locking mode detailed in @_ctx) the buffer
* object it points to. If successful, assign @_bo to the address of the
* buffer object and update @_cursor. The iteration is guarded in the
* sense that @_cursor will be initialized before looping start and cleaned
* up at looping termination, even if terminated prematurely by, for
* example a return or break statement. Exiting the loop will also unlock
* (if needed) and unreference @_bo.
*/
#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
(_bo) = ttm_bo_lru_cursor_next(_cursor))

#endif

0 comments on commit f3bcfd0

Please sign in to comment.