From d25e8fddbba373cb1fbaf504505de81503d56215 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:57:10 +0100 Subject: [PATCH] --- yaml --- r: 358161 b: refs/heads/master c: f2d476a110bc24fde008698ae9018c99e803e25c h: refs/heads/master i: 358159: 7aaa318fa3ec2a6ac02afb5f79a1b46982b07911 v: v3 --- [refs] | 2 +- trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c | 23 ++++++++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 0e306de62184..4b7e663a797e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5e45d7dfd74100d622f9cdc70bfd1f9fae1671de +refs/heads/master: f2d476a110bc24fde008698ae9018c99e803e25c diff --git a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c7d323657798..7b90def15674 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; -retry: spin_lock(&glob->lru_lock); val_seq = entry->bo->bdev->val_seq++; +retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; + /* already slowpath reserved? */ + if (entry->reserved) + continue; + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: @@ -155,11 +159,26 @@ int ttm_eu_reserve_buffers(struct list_head *list) /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); + + /* + * temporarily increase sequence number every retry, + * to prevent us from seeing our old reservation + * sequence when someone else reserved the buffer, + * but hasn't updated the seq_valid/seqno members yet. + */ + val_seq = entry->bo->bdev->val_seq++; + spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_unreserved(bo, true); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); if (unlikely(ret != 0)) return ret; + spin_lock(&glob->lru_lock); + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ret = -EBUSY; + goto err; + } goto retry; default: goto err;