diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 7c2650cfb070f..b23368529a409 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) spin_unlock(&obj->mmo.lock); } -/** - * i915_gem_object_release_mmap - remove physical page mappings - * @obj: obj in question - * - * Preserve the reservation of the mmapping with the DRM core code, but - * relinquish ownership of the pages back to the system. - */ -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) -{ - i915_gem_object_release_mmap_gtt(obj); - i915_gem_object_release_mmap_offset(obj); -} - static struct i915_mmap_offset * lookup_mmo(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index 7c5ccdf593594..efee9e0d25086 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -24,8 +24,6 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, struct drm_device *dev, u32 handle, u64 *offset); -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); - void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 6b69191c55436..eb35bdd10c096 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) atomic_dec(&i915->mm.free_count); } +static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) +{ + /* Skip serialisation and waking the device if known to be not used. */ + + if (obj->userfault_count) + i915_gem_object_release_mmap_gtt(obj); + + if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { + struct i915_mmap_offset *mmo, *mn; + + i915_gem_object_release_mmap_offset(obj); + + rbtree_postorder_for_each_entry_safe(mmo, mn, + &obj->mmo.offsets, + offset) { + drm_vma_offset_remove(obj->base.dev->vma_offset_manager, + &mmo->vma_node); + kfree(mmo); + } + obj->mmo.offsets = RB_ROOT; + } +} + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { struct drm_i915_gem_object *obj, *on; llist_for_each_entry_safe(obj, on, freed, freed) { - struct i915_mmap_offset *mmo, *mn; - trace_i915_gem_object_destroy(obj); if (!list_empty(&obj->vma.list)) { @@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, spin_unlock(&obj->vma.lock); } - i915_gem_object_release_mmap(obj); - - rbtree_postorder_for_each_entry_safe(mmo, mn, - &obj->mmo.offsets, - offset) { - drm_vma_offset_remove(obj->base.dev->vma_offset_manager, - &mmo->vma_node); - kfree(mmo); - } - obj->mmo.offsets = RB_ROOT; + __i915_gem_object_free_mmaps(obj); - GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(!list_empty(&obj->lut_list)); atomic_set(&obj->mm.pages_pin_count, 0);