Skip to content

Commit

Permalink
drm/i915: Fallback to single page GTT mmappings for relocations
Browse files Browse the repository at this point in the history
If we cannot pin the entire object into the mappable region of the GTT,
try to pin a single page instead. This is much more likely to succeed,
and prevents us falling back to the clflush slow path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-14-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Aug 18, 2016
1 parent d50415c commit e8cb909
Showing 1 changed file with 51 additions and 11 deletions.
62 changes: 51 additions & 11 deletions drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->vaddr = 0;
cache->i915 = i915;
cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
cache->node.allocated = false;
}

static inline void *unmask_page(unsigned long p)
Expand Down Expand Up @@ -360,8 +361,19 @@ static void reloc_cache_fini(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
i915_vma_unpin((struct i915_vma *)cache->node.mm);
if (cache->node.allocated) {
struct i915_ggtt *ggtt = &cache->i915->ggtt;

ggtt->base.clear_range(&ggtt->base,
cache->node.start,
cache->node.size,
true);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
}
}

Expand Down Expand Up @@ -401,8 +413,19 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
int page)
{
struct i915_ggtt *ggtt = &cache->i915->ggtt;
unsigned long offset;
void *vaddr;

if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
cache->node.start, I915_CACHE_NONE, 0);
cache->page = page;
return unmask_page(cache->vaddr);
}

if (cache->vaddr) {
io_mapping_unmap_atomic(unmask_page(cache->vaddr));
} else {
Expand All @@ -418,21 +441,38 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,

vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma))
return NULL;
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic
(&ggtt->base.mm, &cache->node,
4096, 0, 0,
0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret)
return ERR_PTR(ret);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret) {
i915_vma_unpin(vma);
return ERR_PTR(ret);
}

ret = i915_gem_object_put_fence(obj);
if (ret) {
i915_vma_unpin(vma);
return ERR_PTR(ret);
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
}

cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
offset = cache->node.start;
if (cache->node.allocated) {
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}

vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable,
cache->node.start + (page << PAGE_SHIFT));
vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;

Expand Down

0 comments on commit e8cb909

Please sign in to comment.