Skip to content

Commit

Permalink
drm/i915: Refactor shmem pread setup
Browse files Browse the repository at this point in the history
The command parser is going to need the same synchronization and
setup logic, so factor it out for reuse.

v2: Add a check that the object is backed by shmem

Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
Brad Volkin authored and Daniel Vetter committed Mar 7, 2014
1 parent 922044c commit 4c914c0
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 14 deletions.
3 changes: 3 additions & 0 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2140,6 +2140,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);

int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);

int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
Expand Down
51 changes: 37 additions & 14 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
return 0;
}

/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
* flush the object from the CPU cache.
*/
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush)
{
int ret;

*needs_clflush = 0;

if (!obj->base.filp)
return -EINVAL;

if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}

ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;

i915_gem_object_pin_pages(obj);

return ret;
}

/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
Expand Down Expand Up @@ -424,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,

obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);

if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}

ret = i915_gem_object_get_pages(obj);
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;

i915_gem_object_pin_pages(obj);

offset = args->offset;

for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
Expand Down

0 comments on commit 4c914c0

Please sign in to comment.