Skip to content

Commit

Permalink
drm/xe: Break of TLB invalidation into its own file
Browse files Browse the repository at this point in the history
TLB invalidation is used by more than USM (page faults) so break this
code out into its own file.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
  • Loading branch information
Matthew Brost authored and Rodrigo Vivi committed Dec 19, 2023
1 parent 5b64366 commit a935184
Show file tree
Hide file tree
Showing 9 changed files with 146 additions and 99 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/xe/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ xe-y += xe_bb.o \
xe_gt_mcr.o \
xe_gt_pagefault.o \
xe_gt_sysfs.o \
xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/xe/xe_gt.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
Expand Down Expand Up @@ -571,6 +572,10 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}

err = xe_gt_tlb_invalidation_init(gt);
if (err)
return err;

err = xe_gt_pagefault_init(gt);
if (err)
return err;
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/xe/xe_gt_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "xe_gt_debugfs.h"
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_engine.h"
#include "xe_macros.h"
Expand Down
99 changes: 3 additions & 96 deletions drivers/gpu/drm/xe/xe_gt_pagefault.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@

#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_gt_pagefault.h"
#include "xe_migrate.h"
#include "xe_pt.h"
#include "xe_trace.h"
Expand Down Expand Up @@ -61,40 +62,6 @@ guc_to_gt(struct xe_guc *guc)
return container_of(guc, struct xe_gt, uc.guc);
}

static int send_tlb_invalidation(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
0,
XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
XE_GUC_TLB_INVAL_FLUSH_CACHE,
};
int seqno;
int ret;

/*
* XXX: The seqno algorithm relies on TLB invalidation being processed
* in order which they currently are, if that changes the algorithm will
* need to be updated.
*/
mutex_lock(&guc->ct.lock);
seqno = gt->usm.tlb_invalidation_seqno;
action[1] = seqno;
gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
TLB_INVALIDATION_SEQNO_MAX;
if (!gt->usm.tlb_invalidation_seqno)
gt->usm.tlb_invalidation_seqno = 1;
ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
G2H_LEN_DW_TLB_INVALIDATE, 1);
if (!ret)
ret = seqno;
mutex_unlock(&guc->ct.lock);

return ret;
}

static bool access_is_atomic(enum access_type access_type)
{
return access_type == ACCESS_TYPE_ATOMIC;
Expand Down Expand Up @@ -278,7 +245,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
* defer TLB invalidate + fault response to a callback of fence
* too
*/
ret = send_tlb_invalidation(&gt->uc.guc);
ret = xe_gt_tlb_invalidation(gt);
if (ret >= 0)
ret = 0;
}
Expand Down Expand Up @@ -433,7 +400,6 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
if (!xe->info.supports_usm)
return 0;

gt->usm.tlb_invalidation_seqno = 1;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
gt->usm.pf_queue[i].gt = gt;
spin_lock_init(&gt->usm.pf_queue[i].lock);
Expand Down Expand Up @@ -482,65 +448,6 @@ void xe_gt_pagefault_reset(struct xe_gt *gt)
}
}

int xe_gt_tlb_invalidation(struct xe_gt *gt)
{
return send_tlb_invalidation(&gt->uc.guc);
}

static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
{
if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
return true;

if (seqno - gt->usm.tlb_invalidation_seqno_recv >
(TLB_INVALIDATION_SEQNO_MAX / 2))
return true;

return false;
}

int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_guc *guc = &gt->uc.guc;
int ret;

/*
* XXX: See above, this algorithm only works if seqno are always in
* order
*/
ret = wait_event_timeout(guc->ct.wq,
tlb_invalidation_seqno_past(gt, seqno),
HZ / 5);
if (!ret) {
drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
seqno, gt->usm.tlb_invalidation_seqno_recv);
return -ETIME;
}

return 0;
}

int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
int expected_seqno;

if (unlikely(len != 1))
return -EPROTO;

/* Sanity check on seqno */
expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
TLB_INVALIDATION_SEQNO_MAX;
XE_WARN_ON(expected_seqno != msg[0]);

gt->usm.tlb_invalidation_seqno_recv = msg[0];
smp_wmb();
wake_up_all(&guc->ct.wq);

return 0;
}

static int granularity_in_byte(int val)
{
switch (val) {
Expand Down
3 changes: 0 additions & 3 deletions drivers/gpu/drm/xe/xe_gt_pagefault.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,7 @@ struct xe_guc;

int xe_gt_pagefault_init(struct xe_gt *gt);
void xe_gt_pagefault_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation(struct xe_gt *gt);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);

#endif /* _XE_GT_PAGEFAULT_ */
115 changes: 115 additions & 0 deletions drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/

#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"

static struct xe_gt *
guc_to_gt(struct xe_guc *guc)
{
return container_of(guc, struct xe_gt, uc.guc);
}

int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
{
gt->usm.tlb_invalidation_seqno = 1;

return 0;
}

static int send_tlb_invalidation(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
0,
XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
XE_GUC_TLB_INVAL_FLUSH_CACHE,
};
int seqno;
int ret;

/*
* XXX: The seqno algorithm relies on TLB invalidation being processed
* in order which they currently are, if that changes the algorithm will
* need to be updated.
*/
mutex_lock(&guc->ct.lock);
seqno = gt->usm.tlb_invalidation_seqno;
action[1] = seqno;
gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
TLB_INVALIDATION_SEQNO_MAX;
if (!gt->usm.tlb_invalidation_seqno)
gt->usm.tlb_invalidation_seqno = 1;
ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
G2H_LEN_DW_TLB_INVALIDATE, 1);
if (!ret)
ret = seqno;
mutex_unlock(&guc->ct.lock);

return ret;
}

int xe_gt_tlb_invalidation(struct xe_gt *gt)
{
return send_tlb_invalidation(&gt->uc.guc);
}

static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
{
if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
return true;

if (seqno - gt->usm.tlb_invalidation_seqno_recv >
(TLB_INVALIDATION_SEQNO_MAX / 2))
return true;

return false;
}

int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_guc *guc = &gt->uc.guc;
int ret;

/*
* XXX: See above, this algorithm only works if seqno are always in
* order
*/
ret = wait_event_timeout(guc->ct.wq,
tlb_invalidation_seqno_past(gt, seqno),
HZ / 5);
if (!ret) {
drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
seqno, gt->usm.tlb_invalidation_seqno_recv);
return -ETIME;
}

return 0;
}

int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
int expected_seqno;

if (unlikely(len != 1))
return -EPROTO;

/* Sanity check on seqno */
expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
TLB_INVALIDATION_SEQNO_MAX;
XE_WARN_ON(expected_seqno != msg[0]);

gt->usm.tlb_invalidation_seqno_recv = msg[0];
smp_wmb();
wake_up_all(&guc->ct.wq);

return 0;
}
19 changes: 19 additions & 0 deletions drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/

#ifndef _XE_GT_TLB_INVALIDATION_H_
#define _XE_GT_TLB_INVALIDATION_H_

#include <linux/types.h>

struct xe_gt;
struct xe_guc;

int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
int xe_gt_tlb_invalidation(struct xe_gt *gt);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);

#endif /* _XE_GT_TLB_INVALIDATION_ */
1 change: 1 addition & 0 deletions drivers/gpu/drm/xe/xe_guc_ct.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_trace.h"
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/xe/xe_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "xe_engine.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"
Expand Down

0 comments on commit a935184

Please sign in to comment.