Skip to content

Commit

Permalink
drm/vmwgfx: switch the TTM backends to self alloc
Browse files Browse the repository at this point in the history
Similar to the TTM range manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-9-christian.koenig@amd.com
  • Loading branch information
Christian König committed Jun 4, 2021
1 parent beb4c86 commit d3bcb4b
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 24 deletions.
18 changes: 11 additions & 7 deletions drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;

mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
if (!mem->mm_node)
return -ENOMEM;

ttm_resource_init(bo, place, mem->mm_node);

id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
Expand Down Expand Up @@ -87,13 +93,11 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);

if (mem->mm_node) {
ida_free(&gman->gmr_ida, mem->start);
spin_lock(&gman->lock);
gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
ida_free(&gman->gmr_ida, mem->start);
spin_lock(&gman->lock);
gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
kfree(mem->mm_node);
}

static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
Expand Down
37 changes: 20 additions & 17 deletions drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>

/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
Expand Down Expand Up @@ -54,16 +55,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node;
struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;

node = kzalloc(sizeof(*node), GFP_KERNEL);
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;

ttm_resource_init(bo, place, &node->base);

lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
Expand All @@ -76,32 +79,34 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
place, mem, lpfn, mode);
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, mem,
lpfn, mode);
if (!ret)
goto found_unlock;
}
}

align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
mem, lpfn, mode);
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, mem, lpfn,
mode);
if (!ret)
goto found_unlock;
}

ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
bo->page_alignment, 0,
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
mem->num_pages, bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);

if (unlikely(ret)) {
kfree(node);
} else {
mem->mm_node = node;
mem->start = node->start;
mem->mm_node = &node->mm_nodes[0];
mem->start = node->mm_nodes[0].start;
}

return ret;
Expand All @@ -113,15 +118,13 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
struct ttm_resource *mem)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct ttm_range_mgr_node * node = mem->mm_node;

if (mem->mm_node) {
spin_lock(&rman->lock);
drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);

kfree(mem->mm_node);
mem->mm_node = NULL;
}
kfree(node);
}

int vmw_thp_init(struct vmw_private *dev_priv)
Expand Down

0 comments on commit d3bcb4b

Please sign in to comment.