Skip to content

Commit

Permalink
Merge tag 'drm-intel-next-fixes-2022-08-11' of git://anongit.freedesk…
Browse files Browse the repository at this point in the history
…top.org/drm/drm-intel into drm-fixes

- disable pci resize on 32-bit systems (Nirmoy)
- don't leak the ccs state (Matt)
- TLB invalidation fixes (Chris)
[now with all fixes of fixes]

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YvVumNCga+90fYN0@intel.com
  • Loading branch information
Dave Airlie committed Aug 17, 2022
2 parents 568035b + 9d50bff commit 8ae4be5
Show file tree
Hide file tree
Showing 15 changed files with 183 additions and 55 deletions.
16 changes: 4 additions & 12 deletions drivers/gpu/drm/i915/gem/i915_gem_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
assert_object_held(obj);
assert_object_held_shared(obj);

if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
Expand Down Expand Up @@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}

if (!i915_gem_object_trylock(obj, NULL)) {
/* busy, toss it back to the pile */
if (llist_add(&obj->freed, &i915->mm.free_list))
queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
continue;
}

__i915_gem_object_pages_fini(obj);
i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);

/* But keep the pointer alive for RCU-protected lookups */
Expand All @@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work.work);
container_of(work, struct drm_i915_private, mm.free_work);

i915_gem_flush_free_objects(i915);
}
Expand Down Expand Up @@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/

if (llist_add(&obj->freed, &i915->mm.free_list))
queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
queue_work(i915->wq, &i915->mm.free_work);
}

void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
Expand Down Expand Up @@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)

void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}

void i915_objects_module_exit(void)
Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_object_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
Expand Down Expand Up @@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;

u32 tlb;
} mm;

struct {
Expand Down
25 changes: 16 additions & 9 deletions drivers/gpu/drm/i915/gem/i915_gem_pages.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@

#include <drm/drm_cache.h>

#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"

#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"

#include "gt/intel_gt.h"

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
Expand Down Expand Up @@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}

static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = to_gt(i915);

if (!obj->mm.tlb)
return;

intel_gt_invalidate_tlb(gt, obj->mm.tlb);
obj->mm.tlb = 0;
}

struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
Expand All @@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;

with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
intel_gt_invalidate_tlbs(to_gt(i915));
}
flush_tlb_invalidate(obj);

return pages;
}
Expand Down
77 changes: 60 additions & 17 deletions drivers/gpu/drm/i915/gt/intel_gt.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"

#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
Expand All @@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);

mutex_init(&gt->tlb_invalidate_lock);

INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);

Expand All @@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
mutex_init(&gt->tlb.invalidate_lock);
seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);

intel_uc_init_early(&gt->uc);
Expand Down Expand Up @@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
Expand Down Expand Up @@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}

void intel_gt_invalidate_tlbs(struct intel_gt *gt)
static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
Expand All @@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;

if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;

if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
Expand All @@ -945,41 +946,50 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;

GEM_TRACE("\n");

assert_rpm_wakelock_held(&i915->runtime_pm);

mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);

spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */

awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;

if (!intel_engine_pm_is_awake(engine))
continue;

rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;

intel_uncore_write_fw(uncore, rb.reg, rb.bit);
awake |= engine->mask;
}

GT_TRACE(gt, "invalidated engines %08x\n", awake);

/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
if (awake &&
(IS_TIGERLAKE(i915) ||
IS_DG1(i915) ||
IS_ROCKETLAKE(i915) ||
IS_ALDERLAKE_S(i915) ||
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);

spin_unlock_irq(&uncore->lock);

for_each_engine(engine, gt, id) {
for_each_engine_masked(engine, gt, awake, tmp) {
struct reg_and_bit rb;

/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
* cap at 1ms. We go a bit higher just in case.
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
struct reg_and_bit rb;

rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;

if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
Expand All @@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
mutex_unlock(&gt->tlb_invalidate_lock);
}

static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
{
u32 cur = intel_gt_tlb_seqno(gt);

/* Only skip if a *full* TLB invalidate barrier has passed */
return (s32)(cur - ALIGN(seqno, 2)) > 0;
}

void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
{
intel_wakeref_t wakeref;

if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;

if (intel_gt_is_wedged(gt))
return;

if (tlb_seqno_passed(gt, seqno))
return;

with_intel_gt_pm_if_awake(gt, wakeref) {
mutex_lock(&gt->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;

mmio_invalidate_full(gt);

write_seqcount_invalidate(&gt->tlb.seqno);
unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
12 changes: 11 additions & 1 deletion drivers/gpu/drm/i915/gt/intel_gt.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,

void intel_gt_watchdog_work(struct work_struct *work);

void intel_gt_invalidate_tlbs(struct intel_gt *gt);
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
return seqprop_sequence(&gt->tlb.seqno);
}

static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
{
return intel_gt_tlb_seqno(gt) | 1;
}

void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);

#endif /* __INTEL_GT_H__ */
3 changes: 3 additions & 0 deletions drivers/gpu/drm/i915/gt/intel_gt_pm.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)

#define with_intel_gt_pm_if_awake(gt, wf) \
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)

static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
Expand Down
18 changes: 17 additions & 1 deletion drivers/gpu/drm/i915/gt/intel_gt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
Expand Down Expand Up @@ -83,7 +84,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;

struct mutex tlb_invalidate_lock;
struct {
/* Serialize global tlb invalidations */
struct mutex invalidate_lock;

/*
* Batch TLB invalidations
*
* After unbinding the PTE, we need to ensure the TLB
* are invalidated prior to releasing the physical pages.
* But we only need one such invalidation for all unbinds,
* so we track how many TLB invalidations have been
* performed since unbind the PTE and only emit an extra
* invalidate if no full barrier has been passed.
*/
seqcount_mutex_t seqno;
} tlb;

struct i915_wa_list wa_list;

Expand Down
23 changes: 22 additions & 1 deletion drivers/gpu/drm/i915/gt/intel_migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -708,7 +708,7 @@ intel_context_migrate_copy(struct intel_context *ce,
u8 src_access, dst_access;
struct i915_request *rq;
int src_sz, dst_sz;
bool ccs_is_src;
bool ccs_is_src, overwrite_ccs;
int err;

GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
Expand Down Expand Up @@ -749,6 +749,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}

overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;

src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
Expand Down Expand Up @@ -852,6 +854,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
} else if (overwrite_ccs) {
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;

/*
* While we can't always restore/manage the CCS state,
* we still need to ensure we don't leak the CCS state
* from the previous user, so make sure we overwrite it
* with something.
*/
err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
dst_offset, DIRECT_ACCESS, len);
if (err)
goto out_rq;

err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
}

/* Arbitration is re-enabled between requests. */
Expand Down
Loading

0 comments on commit 8ae4be5

Please sign in to comment.