Skip to content

Commit

Permalink
drm/i915: Pull scheduling under standalone lock
Browse files Browse the repository at this point in the history
Currently, the backend scheduling code abuses struct_mutex into order to
have a global lock to manipulate a temporary list (without widespread
allocation) and to protect against list modifications. This is an
extraneous coupling to struct_mutex and further can not extend beyond
the local device.

Pull all the code that needs to be under the one true lock into
i915_scheduler.c, and make it so.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181001144755.7978-2-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Oct 1, 2018
1 parent b16c765 commit e2f3496
Show file tree
Hide file tree
Showing 8 changed files with 411 additions and 361 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
Expand Down
85 changes: 0 additions & 85 deletions drivers/gpu/drm/i915/i915_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}

static struct i915_dependency *
i915_dependency_alloc(struct drm_i915_private *i915)
{
return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
}

static void
i915_dependency_free(struct drm_i915_private *i915,
struct i915_dependency *dep)
{
kmem_cache_free(i915->dependencies, dep);
}

static void
__i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags)
{
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
}

static int
i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;

dep = i915_dependency_alloc(i915);
if (!dep)
return -ENOMEM;

__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC);
return 0;
}

static void
i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;

GEM_BUG_ON(!list_empty(&node->link));

/*
* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));

list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}

/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));

list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}
}

static void
i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
}

static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
Expand Down
8 changes: 0 additions & 8 deletions drivers/gpu/drm/i915/i915_request.h
Original file line number Diff line number Diff line change
Expand Up @@ -332,14 +332,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}

static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
const struct i915_request *rq =
container_of(node, const struct i915_request, sched);

return i915_request_completed(rq);
}

void i915_retire_requests(struct drm_i915_private *i915);

/*
Expand Down
Loading

0 comments on commit e2f3496

Please sign in to comment.