Skip to content

Commit

Permalink
drm/i915/gem: Only revoke mmap handlers if active
Browse files Browse the repository at this point in the history
Avoid waking up the device and taking stale locks if we know that the
object is not currently mmapped. This is particularly useful as not many
object are actually mmapped and so we can destroy them without waking
the device up, and gives us a little more freedom of workqueue ordering
during shutdown.

v2: Pull the release_mmap() into its single user in freeing the objects,
where there can not be any race with a concurrent user of the freed
object. Or so one hopes!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
Link: https://patchwork.freedesktop.org/patch/msgid/20200702163623.6402-2-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Jul 2, 2020
1 parent cb2baf4 commit db83378
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 28 deletions.
13 changes: 0 additions & 13 deletions drivers/gpu/drm/i915/gem/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_unlock(&obj->mmo.lock);
}

/**
* i915_gem_object_release_mmap - remove physical page mappings
* @obj: obj in question
*
* Preserve the reservation of the mmapping with the DRM core code, but
* relinquish ownership of the pages back to the system.
*/
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
i915_gem_object_release_mmap_gtt(obj);
i915_gem_object_release_mmap_offset(obj);
}

static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/i915/gem/i915_gem_mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
struct drm_device *dev,
u32 handle, u64 *offset);

void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);

void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);

Expand Down
37 changes: 24 additions & 13 deletions drivers/gpu/drm/i915/gem/i915_gem_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
atomic_dec(&i915->mm.free_count);
}

static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */

if (obj->userfault_count)
i915_gem_object_release_mmap_gtt(obj);

if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
struct i915_mmap_offset *mmo, *mn;

i915_gem_object_release_mmap_offset(obj);

rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
obj->mmo.offsets = RB_ROOT;
}
}

static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;

llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_mmap_offset *mmo, *mn;

trace_i915_gem_object_destroy(obj);

if (!list_empty(&obj->vma.list)) {
Expand All @@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}

i915_gem_object_release_mmap(obj);

rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
obj->mmo.offsets = RB_ROOT;
__i915_gem_object_free_mmaps(obj);

GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));

atomic_set(&obj->mm.pages_pin_count, 0);
Expand Down

0 comments on commit db83378

Please sign in to comment.