Skip to content

Commit

Permalink
drm/i915: Simplify userptr locking
Browse files Browse the repository at this point in the history
Use an rwlock instead of spinlock for the global notifier lock
to reduce risk of contention in execbuf.

Protect object state with the object lock whenever possible rather
than with the global notifier lock

Don't take an explicit page_ref in userptr_submit_init() but rather
call get_pages() after obtaining the page list so that
get_pages() holds the page_ref. This means we don't need to call
userptr_submit_fini(), which is needed to avoid awkward locking
in our upcoming VM_BIND code.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210610143525.624677-1-thomas.hellstrom@linux.intel.com
  • Loading branch information
Thomas Hellström authored and Matthew Auld committed Jun 14, 2021
1 parent 35c6367 commit b4b9731
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 66 deletions.
21 changes: 8 additions & 13 deletions drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}

static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr)
static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
{
const unsigned int count = eb->buffer_count;
unsigned int i;
Expand All @@ -1008,11 +1008,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release

eb_unreserve_vma(ev);

if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) {
ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT;
i915_gem_object_userptr_submit_fini(vma->obj);
}

if (final)
i915_vma_put(vma);
}
Expand Down Expand Up @@ -1990,7 +1985,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
}

/* We may process another execbuffer during the unlock... */
eb_release_vmas(eb, false, true);
eb_release_vmas(eb, false);
i915_gem_ww_ctx_fini(&eb->ww);

if (rq) {
Expand Down Expand Up @@ -2094,7 +2089,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,

err:
if (err == -EDEADLK) {
eb_release_vmas(eb, false, false);
eb_release_vmas(eb, false);
err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err)
goto repeat_validate;
Expand Down Expand Up @@ -2191,7 +2186,7 @@ static int eb_relocate_parse(struct i915_execbuffer *eb)

err:
if (err == -EDEADLK) {
eb_release_vmas(eb, false, false);
eb_release_vmas(eb, false);
err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err)
goto retry;
Expand Down Expand Up @@ -2268,7 +2263,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)

#ifdef CONFIG_MMU_NOTIFIER
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
spin_lock(&eb->i915->mm.notifier_lock);
read_lock(&eb->i915->mm.notifier_lock);

/*
* count is always at least 1, otherwise __EXEC_USERPTR_USED
Expand All @@ -2286,7 +2281,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
break;
}

spin_unlock(&eb->i915->mm.notifier_lock);
read_unlock(&eb->i915->mm.notifier_lock);
}
#endif

Expand Down Expand Up @@ -3435,7 +3430,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,

err = eb_lookup_vmas(&eb);
if (err) {
eb_release_vmas(&eb, true, true);
eb_release_vmas(&eb, true);
goto err_engine;
}

Expand Down Expand Up @@ -3528,7 +3523,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
i915_request_put(eb.request);

err_vma:
eb_release_vmas(&eb, true, true);
eb_release_vmas(&eb, true);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK);
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/i915/gem/i915_gem_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -609,14 +609,12 @@ i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)

int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
#else
static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }

static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }

#endif
Expand Down
72 changes: 22 additions & 50 deletions drivers/gpu/drm/i915/gem/i915_gem_userptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!mmu_notifier_range_blockable(range))
return false;

spin_lock(&i915->mm.notifier_lock);
write_lock(&i915->mm.notifier_lock);

mmu_interval_set_seq(mni, cur_seq);

spin_unlock(&i915->mm.notifier_lock);
write_unlock(&i915->mm.notifier_lock);

/*
* We don't wait when the process is exiting. This is valid
Expand Down Expand Up @@ -107,16 +107,15 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)

static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct page **pvec = NULL;

spin_lock(&i915->mm.notifier_lock);
assert_object_held_shared(obj);

if (!--obj->userptr.page_ref) {
pvec = obj->userptr.pvec;
obj->userptr.pvec = NULL;
}
GEM_BUG_ON(obj->userptr.page_ref < 0);
spin_unlock(&i915->mm.notifier_lock);

if (pvec) {
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
Expand All @@ -128,7 +127,6 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)

static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
Expand All @@ -141,16 +139,13 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (!st)
return -ENOMEM;

spin_lock(&i915->mm.notifier_lock);
if (GEM_WARN_ON(!obj->userptr.page_ref)) {
spin_unlock(&i915->mm.notifier_lock);
ret = -EFAULT;
if (!obj->userptr.page_ref) {
ret = -EAGAIN;
goto err_free;
}

obj->userptr.page_ref++;
pvec = obj->userptr.pvec;
spin_unlock(&i915->mm.notifier_lock);

alloc_table:
sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
Expand Down Expand Up @@ -241,7 +236,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
i915_gem_object_userptr_drop_ref(obj);
}

static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages)
static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
Expand All @@ -259,15 +254,11 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
if (!IS_ERR_OR_NULL(pages))
i915_gem_userptr_put_pages(obj, pages);

if (get_pages)
err = ____i915_gem_object_get_pages(obj);

return err;
}

int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
unsigned int gup_flags = 0;
Expand All @@ -277,39 +268,22 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
if (obj->userptr.notifier.mm != current->mm)
return -EFAULT;

notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);

ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;

/* optimistically try to preserve current pages while unlocked */
if (i915_gem_object_has_pages(obj) &&
!mmu_interval_check_retry(&obj->userptr.notifier,
obj->userptr.notifier_seq)) {
spin_lock(&i915->mm.notifier_lock);
if (obj->userptr.pvec &&
!mmu_interval_read_retry(&obj->userptr.notifier,
obj->userptr.notifier_seq)) {
obj->userptr.page_ref++;

/* We can keep using the current binding, this is the fastpath */
ret = 1;
}
spin_unlock(&i915->mm.notifier_lock);
if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
i915_gem_object_unlock(obj);
return 0;
}

if (!ret) {
/* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
ret = i915_gem_object_userptr_unbind(obj, false);
}
ret = i915_gem_object_userptr_unbind(obj);
i915_gem_object_unlock(obj);
if (ret < 0)
if (ret)
return ret;

if (ret > 0)
return 0;

notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);

pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return -ENOMEM;
Expand All @@ -329,7 +303,9 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
}
ret = 0;

spin_lock(&i915->mm.notifier_lock);
ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out;

if (mmu_interval_read_retry(&obj->userptr.notifier,
!obj->userptr.page_ref ? notifier_seq :
Expand All @@ -341,12 +317,14 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
if (!obj->userptr.page_ref++) {
obj->userptr.pvec = pvec;
obj->userptr.notifier_seq = notifier_seq;

pvec = NULL;
ret = ____i915_gem_object_get_pages(obj);
}

obj->userptr.page_ref--;

out_unlock:
spin_unlock(&i915->mm.notifier_lock);
i915_gem_object_unlock(obj);

out:
if (pvec) {
Expand All @@ -369,11 +347,6 @@ int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
return 0;
}

void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj)
{
i915_gem_object_userptr_drop_ref(obj);
}

int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
{
int err;
Expand All @@ -396,7 +369,6 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
i915_gem_object_unlock(obj);
}

i915_gem_object_userptr_submit_fini(obj);
return err;
}

Expand Down Expand Up @@ -572,7 +544,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_MMU_NOTIFIER
spin_lock_init(&dev_priv->mm.notifier_lock);
rwlock_init(&dev_priv->mm.notifier_lock);
#endif

return 0;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -552,7 +552,7 @@ struct i915_gem_mm {
* notifier_lock for mmu notifiers, memory may not be allocated
* while holding this lock.
*/
spinlock_t notifier_lock;
rwlock_t notifier_lock;
#endif

/* shrinker accounting, also useful for userland debugging */
Expand Down

0 comments on commit b4b9731

Please sign in to comment.