Skip to content

Commit

Permalink
drm/radeon: let sa manager block for fences to wait for v2
Browse files Browse the repository at this point in the history
Otherwise we can encounter out of memory situations under extreme load.

v2: add documentation for the new function

Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Christian König committed Jul 18, 2012
1 parent 246fa34 commit bfb38d3
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 23 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/radeon/radeon.h
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
spinlock_t lock;
wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
Expand Down
82 changes: 60 additions & 22 deletions drivers/gpu/drm/radeon/radeon_sa.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
{
int i, r;

spin_lock_init(&sa_manager->lock);
init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
Expand Down Expand Up @@ -211,6 +211,39 @@ static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
return false;
}

/**
* radeon_sa_event - Check if we can stop waiting
*
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Check if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly
*/
static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
int i;

for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!list_empty(&sa_manager->flist[i])) {
return true;
}
}

soffset = radeon_sa_bo_hole_soffset(sa_manager);
eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;

if ((eoffset - soffset) >= (size + wasted)) {
return true;
}

return false;
}

static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
struct radeon_fence **fences,
unsigned *tries)
Expand Down Expand Up @@ -297,8 +330,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);

spin_lock(&sa_manager->lock);
do {
spin_lock(&sa_manager->wq.lock);
while(1) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
tries[i] = 0;
Expand All @@ -309,30 +342,34 @@ int radeon_sa_bo_new(struct radeon_device *rdev,

if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
spin_unlock(&sa_manager->lock);
spin_unlock(&sa_manager->wq.lock);
return 0;
}

/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));

if (block) {
spin_unlock(&sa_manager->lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->lock);
if (r) {
/* if we have nothing to wait for we
are practically out of memory */
if (r == -ENOENT) {
r = -ENOMEM;
}
goto out_err;
}
if (!block) {
break;
}

spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
r = wait_event_interruptible_locked(
sa_manager->wq,
radeon_sa_event(sa_manager, size, align)
);
}
if (r) {
goto out_err;
}
} while (block);
};

out_err:
spin_unlock(&sa_manager->lock);
spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
Expand All @@ -348,15 +385,16 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
}

sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->lock);
spin_lock(&sa_manager->wq.lock);
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
spin_unlock(&sa_manager->lock);
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
*sa_bo = NULL;
}

Expand All @@ -366,7 +404,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
{
struct radeon_sa_bo *i;

spin_lock(&sa_manager->lock);
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
Expand All @@ -381,6 +419,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
}
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->lock);
spin_unlock(&sa_manager->wq.lock);
}
#endif

0 comments on commit bfb38d3

Please sign in to comment.