Skip to content

Commit

Permalink
drm/i915/gt: Trace RPS events
Browse files Browse the repository at this point in the history
Add tracek to the RPS events (interrupts, worker, enabling, threshold
selection, frequency setting), so that if we have to debug reticent HW
we have some traces to start from.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200424162805.25920-1-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Apr 24, 2020
1 parent 1ebf7aa commit 555a322
Showing 1 changed file with 44 additions and 4 deletions.
48 changes: 44 additions & 4 deletions drivers/gpu/drm/i915/gt/intel_rps.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);

GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
rps->pm_events, rps_pm_mask(rps, rps->last_freq));

rps_reset_ei(rps);

spin_lock_irq(&gt->irq_lock);
Expand Down Expand Up @@ -128,6 +131,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
cancel_work_sync(&rps->work);

rps_reset_interrupts(rps);
GT_TRACE(gt, "interrupts:off\n");
}

static const struct cparams {
Expand Down Expand Up @@ -569,6 +573,10 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (IS_VALLEYVIEW(i915))
goto skip_hw_write;

GT_TRACE(rps_to_gt(rps),
"changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
new_power, threshold_up, ei_up, threshold_down, ei_down);

set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
set(uncore, GEN6_RP_UP_THRESHOLD,
GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
Expand Down Expand Up @@ -633,6 +641,8 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)

void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));

mutex_lock(&rps->power.mutex);
if (interactive) {
if (!rps->power.interactive++ && READ_ONCE(rps->active))
Expand Down Expand Up @@ -660,6 +670,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
GEN6_AGGRESSIVE_TURBO);
set(uncore, GEN6_RPNSWREQ, swreq);

GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
val, intel_gpu_freq(rps, val), swreq);

return 0;
}

Expand All @@ -672,6 +685,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
vlv_punit_put(i915);

GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
val, intel_gpu_freq(rps, val));

return err;
}

Expand Down Expand Up @@ -705,6 +721,8 @@ void intel_rps_unpark(struct intel_rps *rps)
if (!rps->enabled)
return;

GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);

/*
* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
Expand Down Expand Up @@ -772,6 +790,8 @@ void intel_rps_park(struct intel_rps *rps)
*/
rps->cur_freq =
max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);

GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}

void intel_rps_boost(struct i915_request *rq)
Expand All @@ -788,6 +808,9 @@ void intel_rps_boost(struct i915_request *rq)
!dma_fence_is_signaled_locked(&rq->fence)) {
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);

GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);

if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
Expand Down Expand Up @@ -883,6 +906,7 @@ static void gen6_rps_init(struct intel_rps *rps)
static bool rps_reset(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);

/* force a reset */
rps->power.mode = -1;
rps->last_freq = -1;
Expand Down Expand Up @@ -1210,11 +1234,17 @@ void intel_rps_enable(struct intel_rps *rps)
if (!rps->enabled)
return;

drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
GT_TRACE(rps_to_gt(rps),
"min:%x, max:%x, freq:[%d, %d]\n",
rps->min_freq, rps->max_freq,
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->max_freq));

drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
GEM_BUG_ON(rps->max_freq < rps->min_freq);
GEM_BUG_ON(rps->idle_freq > rps->max_freq);

GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
}

static void gen6_rps_disable(struct intel_rps *rps)
Expand Down Expand Up @@ -1482,6 +1512,12 @@ static void rps_work(struct work_struct *work)
max = rps->max_freq_softlimit;
if (client_boost)
max = rps->max_freq;

GT_TRACE(gt,
"pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
pm_iir, yesno(client_boost),
adj, new_freq, min, max);

if (client_boost && new_freq < rps->boost_freq) {
new_freq = rps->boost_freq;
adj = 0;
Expand Down Expand Up @@ -1556,6 +1592,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
if (unlikely(!events))
return;

GT_TRACE(gt, "irq events:%x\n", events);

gen6_gt_pm_mask_irq(gt, events);

rps->pm_iir |= events;
Expand All @@ -1571,6 +1609,8 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
if (events) {
spin_lock(&gt->irq_lock);

GT_TRACE(gt, "irq events:%x\n", events);

gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;

Expand Down

0 comments on commit 555a322

Please sign in to comment.