Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 218346
b: refs/heads/master
c: e3ce8a0
h: refs/heads/master
v: v3
  • Loading branch information
Dave Airlie committed Oct 25, 2010
1 parent 8332549 commit 5ce37bc
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 103 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 641934069d29211baf82afb93622a426172b67b6
refs/heads/master: e3ce8a0b277438591844847ac7c89a980b4cfa6d
203 changes: 113 additions & 90 deletions trunk/drivers/gpu/drm/ttm/ttm_bo.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,132 +434,144 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}

/**
* Call bo::reserved and with the lru lock held.
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
* This is the place to put in driver specific hooks.
* Will release the bo::reserved lock and the
* lru lock on exit.
* This is the place to put in driver specific hooks to release
* driver private resources.
* Will release the bo::reserved lock.
*/

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;

if (bo->ttm) {

/**
* Release the lru_lock, since we don't want to have
* an atomic requirement on ttm_tt[unbind|destroy].
*/

spin_unlock(&glob->lru_lock);
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
spin_lock(&glob->lru_lock);
}

ttm_bo_mem_put_locked(bo, &bo->mem);
ttm_bo_mem_put(bo, &bo->mem);

atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
}


/**
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, and already on delayed list, do nothing.
* If not idle, and not on delayed list, put on delayed list,
* up the list_kref and schedule a delayed list check.
*/

static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_driver *driver;
void *sync_obj;
void *sync_obj_arg;
int put_count;
int ret;

spin_lock(&bo->lock);
retry:
(void) ttm_bo_wait(bo, false, false, !remove_all);

(void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
int put_count;

spin_unlock(&bo->lock);

spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);

/**
* Someone else has the object reserved. Bail and retry.
* Lock inversion between bo::reserve and bo::lock here,
* but that's OK, since we're only trylocking.
*/

if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
spin_lock(&bo->lock);
goto requeue;
}

/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
* also bo::reserved. A busy object at this point may
* be caused by another thread starting an accelerated
* eviction.
*/
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);

if (unlikely(bo->sync_obj)) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
spin_lock(&bo->lock);
if (remove_all)
goto retry;
else
goto requeue;
}
if (unlikely(ret == -EBUSY))
goto queue;

spin_unlock(&bo->lock);
put_count = ttm_bo_del_from_lru(bo);

if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
++put_count;
}

spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);

while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);

return 0;
return;
} else {
spin_lock(&glob->lru_lock);
}
requeue:
queue:
sync_obj = bo->sync_obj;
sync_obj_arg = bo->sync_obj_arg;
driver = bdev->driver;

kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);

if (sync_obj)
driver->sync_obj_flush(sync_obj, sync_obj_arg);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
* function ttm_bo_cleanup_refs
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
* @interruptible Any sleeps should occur interruptibly.
* @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/

static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret = 0;

retry:
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);

if (unlikely(ret != 0))
return ret;

spin_lock(&glob->lru_lock);
if (list_empty(&bo->ddestroy)) {
void *sync_obj = bo->sync_obj;
void *sync_obj_arg = bo->sync_obj_arg;
ret = ttm_bo_reserve_locked(bo, interruptible,
no_wait_reserve, false, 0);

kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);
return ret;
}

if (sync_obj)
driver->sync_obj_flush(sync_obj, sync_obj_arg);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
ret = 0;
/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
* also bo::reserved. A busy object at this point may
* be caused by another thread recently starting an accelerated
* eviction.
*/

} else {
if (unlikely(bo->sync_obj)) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);
ret = -EBUSY;
goto retry;
}

return ret;
put_count = ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
++put_count;

spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);

while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);

return 0;
}

/**
Expand Down Expand Up @@ -591,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
}

spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(entry, remove_all);
ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
!remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;

Expand Down Expand Up @@ -634,7 +647,7 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
ttm_bo_cleanup_refs(bo, false);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
}
Expand Down Expand Up @@ -742,6 +755,18 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);

if (!list_empty(&bo->ddestroy)) {
spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(bo, interruptible,
no_wait_reserve, no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);

if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;

goto retry;
}

ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);

if (unlikely(ret == -EBUSY)) {
Expand Down Expand Up @@ -784,15 +809,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
}
EXPORT_SYMBOL(ttm_bo_mem_put);

void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];

if (mem->mm_node)
(*man->func->put_node_locked)(man, mem);
}
EXPORT_SYMBOL(ttm_bo_mem_put_locked);

/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
Expand Down Expand Up @@ -1774,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
struct ttm_buffer_object, swap);
kref_get(&bo->list_kref);

if (!list_empty(&bo->ddestroy)) {
spin_unlock(&glob->lru_lock);
(void) ttm_bo_cleanup_refs(bo, false, false, false);
kref_put(&bo->list_kref, ttm_bo_release_list);
continue;
}

/**
* Reserve buffer. Since we unlock while sleeping, we need
* to re-check that nobody removed us from the swap-list while
Expand Down
10 changes: 0 additions & 10 deletions trunk/drivers/gpu/drm/ttm/ttm_bo_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,6 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
}
}

static void ttm_bo_man_put_node_locked(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
if (mem->mm_node) {
drm_mm_put_block(mem->mm_node);
mem->mm_node = NULL;
}
}

static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
Expand Down Expand Up @@ -152,7 +143,6 @@ const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_takedown,
ttm_bo_man_get_node,
ttm_bo_man_put_node,
ttm_bo_man_put_node_locked,
ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
2 changes: 0 additions & 2 deletions trunk/include/drm/ttm/ttm_bo_driver.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,6 @@ struct ttm_mem_type_manager_func {
struct ttm_mem_reg *mem);
void (*put_node)(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem);
void (*put_node_locked)(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem);
void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
};

Expand Down

0 comments on commit 5ce37bc

Please sign in to comment.