Skip to content

Commit

Permalink
drm/i915: Move object backing storage manipulation to its own locking
Browse files Browse the repository at this point in the history
Break the allocation of the backing storage away from struct_mutex into
a per-object lock. This allows parallel page allocation, provided we can
do so outside of struct_mutex (i.e. set-domain-ioctl, pwrite, GTT
fault), i.e. before execbuf! The increased cost of the atomic counters
are hidden behind i915_vma_pin() for the typical case of execbuf, i.e.
as the object is typically bound between execbufs, the page_pin_count is
static. The cost will be felt around set-domain and pwrite, but offset
by the improvement from reduced struct_mutex contention.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-14-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Oct 28, 2016
1 parent 03ac84f commit 1233e2d
Show file tree
Hide file tree
Showing 5 changed files with 114 additions and 78 deletions.
36 changes: 12 additions & 24 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2274,7 +2274,8 @@ struct drm_i915_gem_object {
unsigned int pin_display;

struct {
unsigned int pages_pin_count;
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;

struct sg_table *pages;
void *mapping;
Expand Down Expand Up @@ -2387,13 +2388,6 @@ i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
return atomic_read(&obj->base.refcount.refcount) == 0;
}

#if IS_ENABLED(CONFIG_LOCKDEP)
#define lockdep_assert_held_unless(lock, cond) \
GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
#else
#define lockdep_assert_held_unless(lock, cond)
#endif

static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
Expand Down Expand Up @@ -3229,9 +3223,9 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
might_lock(&obj->mm.lock);

if (obj->mm.pages_pin_count++)
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;

return __i915_gem_object_get_pages(obj);
Expand All @@ -3240,32 +3234,29 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
i915_gem_object_is_dead(obj));
GEM_BUG_ON(!obj->mm.pages);

obj->mm.pages_pin_count++;
atomic_inc(&obj->mm.pages_pin_count);
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
return obj->mm.pages_pin_count;
return atomic_read(&obj->mm.pages_pin_count);
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
i915_gem_object_is_dead(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);

obj->mm.pages_pin_count--;
GEM_BUG_ON(obj->mm.pages_pin_count < obj->bind_count);
atomic_dec(&obj->mm.pages_pin_count);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}

static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
__i915_gem_object_unpin_pages(obj);
}
Expand All @@ -3288,8 +3279,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
* The caller must hold the struct_mutex, and is responsible for calling
* i915_gem_object_unpin_map() when the mapping is no longer required.
* The caller is responsible for calling i915_gem_object_unpin_map() when the
* mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
Expand All @@ -3305,12 +3296,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*
* The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}

Expand Down
93 changes: 62 additions & 31 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2269,6 +2269,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;

lockdep_assert_held(&obj->mm.lock);
GEM_BUG_ON(obj->mm.pages);

switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
Expand Down Expand Up @@ -2325,12 +2328,17 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;

lockdep_assert_held(&obj->base.dev->struct_mutex);

if (i915_gem_object_has_pinned_pages(obj))
return;

GEM_BUG_ON(obj->bind_count);
if (!READ_ONCE(obj->mm.pages))
return;

/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, SINGLE_DEPTH_NESTING);
if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
goto unlock;

/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
Expand All @@ -2353,6 +2361,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);

obj->ops->put_pages(obj, pages);
unlock:
mutex_unlock(&obj->mm.lock);
}

static unsigned int swiotlb_max_size(void)
Expand Down Expand Up @@ -2486,7 +2496,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
lockdep_assert_held(&obj->mm.lock);

obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
Expand All @@ -2512,25 +2522,33 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}

/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_get_pages() may be called
* and pinned into our object. i915_gem_object_pin_pages() may be called
* multiple times before they are released by a single call to
* i915_gem_object_put_pages() - once the pages are no longer referenced
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;

lockdep_assert_held(&obj->base.dev->struct_mutex);
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
return err;

if (obj->mm.pages)
return 0;
if (likely(obj->mm.pages)) {
__i915_gem_object_pin_pages(obj);
goto unlock;
}

GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

err = ____i915_gem_object_get_pages(obj);
if (err)
__i915_gem_object_unpin_pages(obj);
if (!err)
atomic_set_release(&obj->mm.pages_pin_count, 1);

unlock:
mutex_unlock(&obj->mm.lock);
return err;
}

Expand Down Expand Up @@ -2590,20 +2608,29 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;

lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

ret = i915_gem_object_pin_pages(obj);
ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);

pinned = obj->mm.pages_pin_count > 1;
pinned = true;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;

GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count));
atomic_set_release(&obj->mm.pages_pin_count, 1);
pinned = false;
}
GEM_BUG_ON(!obj->mm.pages);

ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
goto err;
goto err_unpin;
}

if (is_vmalloc_addr(ptr))
Expand All @@ -2618,17 +2645,21 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
goto err;
goto err_unpin;
}

obj->mm.mapping = ptr_pack_bits(ptr, type);
}

out_unlock:
mutex_unlock(&obj->mm.lock);
return ptr;

err:
i915_gem_object_unpin_pages(obj);
return ERR_PTR(ret);
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
ptr = ERR_PTR(ret);
goto out_unlock;
}

static void
Expand Down Expand Up @@ -4268,7 +4299,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
int err;

switch (args->madv) {
case I915_MADV_DONTNEED:
Expand All @@ -4278,15 +4309,13 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}

ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;

obj = i915_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
goto unlock;
}
if (!obj)
return -ENOENT;

err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
goto out;

if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
Expand All @@ -4305,18 +4334,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_truncate(obj);

args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);

out:
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
return err;
}

void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
int i;

mutex_init(&obj->mm.lock);

INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->userfault_link);
for (i = 0; i < I915_NUM_ENGINES; i++)
Expand Down Expand Up @@ -4479,7 +4510,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->ops->release(obj);

if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
obj->mm.pages_pin_count = 0;
atomic_set(&obj->mm.pages_pin_count, 0);
if (discard_backing_storage(obj))
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_put_pages(obj);
Expand Down
Loading

0 comments on commit 1233e2d

Please sign in to comment.