Skip to content

Commit

Permalink
drm/i915: Move request runtime management onto gt
Browse files Browse the repository at this point in the history
Requests are run from the gt and are tided into the gt runtime power
management, so pull the runtime request management under gt/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-12-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Oct 4, 2019
1 parent 789ed95 commit 6610197
Show file tree
Hide file tree
Showing 21 changed files with 213 additions and 158 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ gt-y += \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
gt/intel_rc6.o \
Expand Down
4 changes: 3 additions & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/sizes.h>

#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"

#include "i915_drv.h"
#include "i915_gem_gtt.h"
Expand Down Expand Up @@ -424,14 +425,15 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
static int create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = &i915->gt;
int err;

err = drm_gem_create_mmap_offset(&obj->base);
if (likely(!err))
return 0;

/* Attempt to reap some mmap space from dead objects */
err = i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT);
err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;

Expand Down
28 changes: 4 additions & 24 deletions drivers/gpu/drm/i915/gem/i915_gem_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,31 +7,18 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"

#include "i915_drv.h"
#include "i915_globals.h"

static void i915_gem_park(struct drm_i915_private *i915)
{
cancel_delayed_work(&i915->gem.retire_work);

i915_vma_parked(i915);

i915_globals_park();
}

static void retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.retire_work.work);

i915_retire_requests(i915);

queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
}

static int pm_notifier(struct notifier_block *nb,
unsigned long action,
void *data)
Expand All @@ -42,9 +29,6 @@ static int pm_notifier(struct notifier_block *nb,
switch (action) {
case INTEL_GT_UNPARK:
i915_globals_unpark();
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
break;

case INTEL_GT_PARK:
Expand All @@ -59,7 +43,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !intel_gt_is_wedged(gt);

if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
Expand Down Expand Up @@ -122,14 +106,12 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
switch_to_kernel_context_sync(&i915->gt);
intel_gt_suspend(&i915->gt);
intel_uc_suspend(&i915->gt.uc);

cancel_delayed_work_sync(&i915->gt.hangcheck.work);

i915_gem_drain_freed_objects(i915);

intel_uc_suspend(&i915->gt.uc);
intel_gt_suspend(&i915->gt);
}

static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
Expand Down Expand Up @@ -239,8 +221,6 @@ void i915_gem_resume(struct drm_i915_private *i915)

void i915_gem_init__pm(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);

i915->gem.pm_notifier.notifier_call = pm_notifier;
blocking_notifier_chain_register(&i915->gt.pm_notifications,
&i915->gem.pm_notifier);
Expand Down
5 changes: 3 additions & 2 deletions drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"

Expand Down Expand Up @@ -518,7 +519,7 @@ create_test_object(struct i915_address_space *vm,
int err;

/* Keep in GEM's good graces */
i915_retire_requests(vm->i915);
intel_gt_retire_requests(vm->gt);

size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
Expand Down Expand Up @@ -1136,7 +1137,7 @@ __sseu_finish(const char *name,
igt_spinner_end(spin);

if ((flags & TEST_IDLE) && ret == 0) {
ret = i915_gem_wait_for_idle(ce->engine->i915,
ret = intel_gt_wait_for_idle(ce->engine->gt,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work);
cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}

static void restore_retire_worker(struct drm_i915_private *i915)
Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/i915/gt/intel_gt.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
#include "intel_rc6.h"
#include "intel_uncore.h"
Expand All @@ -23,6 +24,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)

intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
}
Expand Down
5 changes: 4 additions & 1 deletion drivers/gpu/drm/i915/gt/intel_gt_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_wakeref.h"
Expand Down Expand Up @@ -49,6 +50,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);

intel_gt_queue_hangcheck(gt);
intel_gt_unpark_requests(gt);

pm_notify(gt, INTEL_GT_UNPARK);

Expand All @@ -64,6 +66,7 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_TRACE("\n");

pm_notify(gt, INTEL_GT_PARK);
intel_gt_park_requests(gt);

i915_pmu_gt_parked(i915);
if (INTEL_GEN(i915) >= 6)
Expand Down Expand Up @@ -196,7 +199,7 @@ int intel_gt_resume(struct intel_gt *gt)

static void wait_for_idle(struct intel_gt *gt)
{
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
Expand Down
123 changes: 123 additions & 0 deletions drivers/gpu/drm/i915/gt/intel_gt_requests.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/

#include "i915_request.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_timeline.h"

static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;

list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}

long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags;
bool interruptible;
LIST_HEAD(free);

interruptible = true;
if (unlikely(timeout < 0))
timeout = -timeout, interruptible = false;

spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;

intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);

if (timeout > 0) {
struct dma_fence *fence;

fence = i915_active_fence_get(&tl->last_request);
if (fence) {
timeout = dma_fence_wait_timeout(fence,
true,
timeout);
dma_fence_put(fence);
}
}

retire_requests(tl);

spin_lock_irqsave(&timelines->lock, flags);

/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link);

mutex_unlock(&tl->mutex);

/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(tl->active_count);
list_add(&tl->link, &free);
}
}
spin_unlock_irqrestore(&timelines->lock, flags);

list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);

return active_count ? timeout : 0;
}

int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
{
/* If the device is asleep, we have no requests outstanding */
if (!intel_gt_pm_is_awake(gt))
return 0;

while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}

return timeout;
}

static void retire_work_handler(struct work_struct *work)
{
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);

intel_gt_retire_requests(gt);
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}

void intel_gt_init_requests(struct intel_gt *gt)
{
INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
}

void intel_gt_park_requests(struct intel_gt *gt)
{
cancel_delayed_work(&gt->requests.retire_work);
}

void intel_gt_unpark_requests(struct intel_gt *gt)
{
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
24 changes: 24 additions & 0 deletions drivers/gpu/drm/i915/gt/intel_gt_requests.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/

#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H

struct intel_gt;

long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
static inline void intel_gt_retire_requests(struct intel_gt *gt)
{
intel_gt_retire_requests_timeout(gt, 0);
}

int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);

void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);

#endif /* INTEL_GT_REQUESTS_H */
11 changes: 11 additions & 0 deletions drivers/gpu/drm/i915/gt/intel_gt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,17 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;

struct intel_gt_requests {
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
} requests;

struct intel_wakeref wakeref;
atomic_t user_wakeref;

Expand Down
Loading

0 comments on commit 6610197

Please sign in to comment.