Skip to content

Commit

Permalink
drm/ttm: flip the switch, and convert to dma_fence
Browse files Browse the repository at this point in the history
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
  • Loading branch information
Maarten Lankhorst committed Sep 2, 2014
1 parent 2f453ed commit f2c24b8
Show file tree
Hide file tree
Showing 25 changed files with 197 additions and 386 deletions.
48 changes: 6 additions & 42 deletions drivers/gpu/drm/nouveau/nouveau_bo.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)

static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
struct nouveau_fence *fence)
struct fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);

if (tile) {
spin_lock(&drm->tile.lock);
tile->fence = nouveau_fence_ref(fence);
tile->fence = nouveau_fence_ref((struct nouveau_fence *)fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
Expand Down Expand Up @@ -976,7 +976,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo, fence,
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
no_wait_gpu,
new_mem);
Expand Down Expand Up @@ -1167,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct fence *fence = reservation_object_get_excl(bo->resv);

nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
}

Expand Down Expand Up @@ -1455,47 +1457,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}

static void
nouveau_bo_fence_unref(void **sync_obj)
{
nouveau_fence_unref((struct nouveau_fence **)sync_obj);
}

void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
struct reservation_object *resv = nvbo->bo.resv;

nouveau_bo_fence_unref(&nvbo->bo.sync_obj);
nvbo->bo.sync_obj = nouveau_fence_ref(fence);

reservation_object_add_excl_fence(resv, &fence->base);
}

static void *
nouveau_bo_fence_ref(void *sync_obj)
{
return nouveau_fence_ref(sync_obj);
}

static bool
nouveau_bo_fence_signalled(void *sync_obj)
{
return nouveau_fence_done(sync_obj);
}

static int
nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
{
return nouveau_fence_wait(sync_obj, lazy, intr);
}

static int
nouveau_bo_fence_flush(void *sync_obj)
{
return 0;
}

struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
Expand All @@ -1506,11 +1475,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = nouveau_bo_fence_signalled,
.sync_obj_wait = nouveau_bo_fence_wait,
.sync_obj_flush = nouveau_bo_fence_flush,
.sync_obj_unref = nouveau_bo_fence_unref,
.sync_obj_ref = nouveau_bo_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
Expand Down
24 changes: 8 additions & 16 deletions drivers/gpu/drm/nouveau/nouveau_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,25 +185,26 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}

void
nouveau_fence_work(struct nouveau_fence *fence,
nouveau_fence_work(struct fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;

if (fence_is_signaled(&fence->base))
if (fence_is_signaled(fence))
goto err;

work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
WARN_ON(nouveau_fence_wait(fence, false, false));
WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
false, false));
goto err;
}

INIT_WORK(&work->work, nouveau_fence_work_handler);
work->func = func;
work->data = data;

if (fence_add_callback(&fence->base, &work->cb, nouveau_fence_work_cb) < 0)
if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;

Expand Down Expand Up @@ -349,14 +350,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
struct reservation_object_list *fobj;
int ret = 0, i;

fence = nvbo->bo.sync_obj;
if (fence && fence_is_signaled(fence)) {
nouveau_fence_unref((struct nouveau_fence **)
&nvbo->bo.sync_obj);
fence = NULL;
}
fence = reservation_object_get_excl(resv);

if (fence) {
if (fence && !fence_is_signaled(fence)) {
struct nouveau_fence *f = from_fence(fence);
struct nouveau_channel *prev = f->channel;

Expand All @@ -370,12 +366,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
if (ret)
return ret;

fence = reservation_object_get_excl(resv);
if (fence && !nouveau_local_fence(fence, chan->drm))
ret = fence_wait(fence, true);

fobj = reservation_object_get_list(resv);
if (!fobj || ret)
if (!fobj)
return ret;

for (i = 0; i < fobj->shared_count && !ret; ++i) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/nouveau/nouveau_fence.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void nouveau_fence_unref(struct nouveau_fence **);

int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
void nouveau_fence_work(struct fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *);

Expand Down
16 changes: 9 additions & 7 deletions drivers/gpu/drm/nouveau/nouveau_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,12 @@ static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct nouveau_fence *fence = NULL;
struct fence *fence = NULL;

list_del(&vma->head);

if (mapped) {
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
}
if (mapped)
fence = reservation_object_get_excl(nvbo->bo.resv);

if (fence) {
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
Expand All @@ -114,7 +113,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
nouveau_vm_put(vma);
kfree(vma);
}
nouveau_fence_unref(&fence);
}

void
Expand Down Expand Up @@ -874,8 +872,12 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL);
if (!ret) {
ret = ttm_bo_wait(&nvbo->bo, true, true, true);
if (!no_wait && ret)
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
if (!no_wait && ret) {
struct fence *excl;

excl = reservation_object_get_excl(nvbo->bo.resv);
fence = nouveau_fence_ref((struct nouveau_fence *)excl);
}

ttm_bo_unreserve(&nvbo->bo);
}
Expand Down
6 changes: 3 additions & 3 deletions drivers/gpu/drm/qxl/qxl_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();

seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
(unsigned long)bo->gem_base.size, bo->pin_count,
bo->tbo.sync_obj, rel);
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->gem_base.size,
bo->pin_count, rel);
}
spin_unlock(&qdev->release_lock);
return 0;
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/qxl/qxl_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,9 +280,7 @@ struct qxl_device {
uint8_t slot_gen_bits;
uint64_t va_slot_mask;

/* XXX: when rcu becomes available, release_lock can be killed */
spinlock_t release_lock;
spinlock_t fence_lock;
struct idr release_idr;
uint32_t release_seqno;
spinlock_t release_idr_lock;
Expand Down
1 change: 0 additions & 1 deletion drivers/gpu/drm/qxl/qxl_kms.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,6 @@ static int qxl_device_init(struct qxl_device *qdev,
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
spin_lock_init(&qdev->release_lock);
spin_lock_init(&qdev->fence_lock);

idr_init(&qdev->surf_id_idr);
spin_lock_init(&qdev->surf_id_idr_lock);
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/qxl/qxl_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
}
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);

r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
Expand Down
3 changes: 0 additions & 3 deletions drivers/gpu/drm/qxl/qxl_release.c
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
bo = entry->bo;
qbo = to_qxl_bo(bo);

if (!entry->bo->sync_obj)
entry->bo->sync_obj = qbo;

reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
Expand Down
104 changes: 0 additions & 104 deletions drivers/gpu/drm/qxl/qxl_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -357,105 +357,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}

static bool qxl_sync_obj_signaled(void *sync_obj);

static int qxl_sync_obj_wait(void *sync_obj,
bool lazy, bool interruptible)
{
struct qxl_bo *bo = (struct qxl_bo *)sync_obj;
struct qxl_device *qdev = bo->gem_base.dev->dev_private;
struct reservation_object_list *fobj;
int count = 0, sc = 0, num_release = 0;
bool have_drawable_releases;

retry:
if (sc == 0) {
if (bo->type == QXL_GEM_DOMAIN_SURFACE)
qxl_update_surface(qdev, bo);
} else if (sc >= 1) {
qxl_io_notify_oom(qdev);
}

sc++;

for (count = 0; count < 10; count++) {
if (qxl_sync_obj_signaled(sync_obj))
return 0;

if (!qxl_queue_garbage_collect(qdev, true))
break;
}

have_drawable_releases = false;
num_release = 0;

spin_lock(&qdev->release_lock);
fobj = bo->tbo.resv->fence;
for (count = 0; fobj && count < fobj->shared_count; count++) {
struct qxl_release *release;

release = container_of(fobj->shared[count],
struct qxl_release, base);

if (fence_is_signaled(&release->base))
continue;

num_release++;

if (release->type == QXL_RELEASE_DRAWABLE)
have_drawable_releases = true;
}
spin_unlock(&qdev->release_lock);

qxl_queue_garbage_collect(qdev, true);

if (have_drawable_releases || sc < 4) {
if (sc > 2)
/* back off */
usleep_range(500, 1000);
if (have_drawable_releases && sc > 300) {
WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, num_release);
return -EBUSY;
}
goto retry;
}
return 0;
}

static int qxl_sync_obj_flush(void *sync_obj)
{
return 0;
}

static void qxl_sync_obj_unref(void **sync_obj)
{
*sync_obj = NULL;
}

static void *qxl_sync_obj_ref(void *sync_obj)
{
return sync_obj;
}

static bool qxl_sync_obj_signaled(void *sync_obj)
{
struct qxl_bo *qbo = (struct qxl_bo *)sync_obj;
struct qxl_device *qdev = qbo->gem_base.dev->dev_private;
struct reservation_object_list *fobj;
bool ret = true;
unsigned i;

spin_lock(&qdev->release_lock);
fobj = qbo->tbo.resv->fence;
for (i = 0; fobj && i < fobj->shared_count; ++i) {
ret = fence_is_signaled(fobj->shared[i]);
if (!ret)
break;
}
spin_unlock(&qdev->release_lock);
return ret;
}

static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
Expand All @@ -482,11 +383,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.sync_obj_signaled = &qxl_sync_obj_signaled,
.sync_obj_wait = &qxl_sync_obj_wait,
.sync_obj_flush = &qxl_sync_obj_flush,
.sync_obj_unref = &qxl_sync_obj_unref,
.sync_obj_ref = &qxl_sync_obj_ref,
.move_notify = &qxl_bo_move_notify,
};

Expand Down
Loading

0 comments on commit f2c24b8

Please sign in to comment.