Skip to content

Commit

Permalink
drm/amdgpu: rename direct to immediate for VM updates
Browse files Browse the repository at this point in the history
To avoid confusion with direct ring submissions rename bottom
of pipe VM table changes to immediate updates.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Christian König authored and Alex Deucher committed Apr 28, 2020
1 parent 9ecefb1 commit eaad0c3
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 50 deletions.
6 changes: 3 additions & 3 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;

if ((*id)->owner != vm->direct.fence_context ||
if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
Expand Down Expand Up @@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;

/* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->direct.fence_context)
if ((*id)->owner != vm->immediate.fence_context)
continue;

if ((*id)->pd_gpu_addr != job->vm_pd_addr)
Expand Down Expand Up @@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}

id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->direct.fence_context;
id->owner = vm->immediate.fence_context;

if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
Expand Down
60 changes: 30 additions & 30 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
* @direct: use a direct update
* @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
*
Expand All @@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
bool direct)
bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
Expand Down Expand Up @@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
params.direct = direct;
params.immediate = immediate;

r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
Expand Down Expand Up @@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @vm: requesting vm
* @level: the page table level
* @direct: use a direct update
* @immediate: use a immediate update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool direct,
int level, bool immediate,
struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
Expand All @@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
bp->no_wait_gpu = direct;
bp->no_wait_gpu = immediate;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
Expand All @@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
* @direct: use a direct update
* @immediate: use an immediate update
*
* Make sure a specific page table or directory is allocated.
*
Expand All @@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_vm_pt_cursor *cursor,
bool direct)
bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
Expand All @@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;

amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);

r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
Expand All @@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);

r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
goto error_free_pt;

Expand Down Expand Up @@ -1276,15 +1276,15 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: submit directly to the paging queue
* @immediate: submit immediately to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct)
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
int r;
Expand All @@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
params.direct = direct;
params.immediate = immediate;

r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
Expand Down Expand Up @@ -1451,7 +1451,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* address range are actually allocated
*/
r = amdgpu_vm_alloc_pts(params->adev, params->vm,
&cursor, params->direct);
&cursor, params->immediate);
if (r)
return r;
}
Expand Down Expand Up @@ -1557,7 +1557,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: direct submission in a page fault
* @immediate: immediate submission in a page fault
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
Expand All @@ -1572,7 +1572,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct,
struct amdgpu_vm *vm, bool immediate,
struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
Expand All @@ -1586,7 +1586,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
params.direct = direct;
params.immediate = immediate;
params.pages_addr = pages_addr;

/* Implicitly sync to command submissions in the same VM before
Expand All @@ -1606,8 +1606,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
struct amdgpu_bo *root = vm->root.base.bo;

if (!dma_fence_is_signaled(vm->last_direct))
amdgpu_bo_fence(root, vm->last_direct, true);
if (!dma_fence_is_signaled(vm->last_immediate))
amdgpu_bo_fence(root, vm->last_immediate, true);
}

r = vm->update_funcs->prepare(&params, resv, sync_mode);
Expand Down Expand Up @@ -2582,7 +2582,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;

/* Don't evict VM page tables while they are updated */
if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
if (!dma_fence_is_signaled(bo_base->vm->last_immediate)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
Expand Down Expand Up @@ -2759,7 +2759,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;

return dma_fence_wait_timeout(vm->last_direct, true, timeout);
return dma_fence_wait_timeout(vm->last_immediate, true, timeout);
}

/**
Expand Down Expand Up @@ -2795,7 +2795,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,


/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
Expand All @@ -2805,7 +2805,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
goto error_free_immediate;

vm->pte_support_ats = false;
vm->is_compute_context = false;
Expand All @@ -2831,7 +2831,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
vm->last_immediate = dma_fence_get_stub();

mutex_init(&vm->eviction_lock);
vm->evicting = false;
Expand Down Expand Up @@ -2885,11 +2885,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL;

error_free_delayed:
dma_fence_put(vm->last_direct);
dma_fence_put(vm->last_immediate);
drm_sched_entity_destroy(&vm->delayed);

error_free_direct:
drm_sched_entity_destroy(&vm->direct);
error_free_immediate:
drm_sched_entity_destroy(&vm->immediate);

return r;
}
Expand Down Expand Up @@ -3086,8 +3086,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}

dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
dma_fence_wait(vm->last_immediate, false);
dma_fence_put(vm->last_immediate);

list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
Expand All @@ -3104,7 +3104,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo);

drm_sched_entity_destroy(&vm->direct);
drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed);

if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
Expand Down
10 changes: 5 additions & 5 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -206,9 +206,9 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;

/**
* @direct: if changes should be made directly
* @immediate: if changes should be made immediately
*/
bool direct;
bool immediate;

/**
* @pages_addr:
Expand Down Expand Up @@ -274,11 +274,11 @@ struct amdgpu_vm {
struct dma_fence *last_update;

/* Scheduler entities for page table updates */
struct drm_sched_entity direct;
struct drm_sched_entity immediate;
struct drm_sched_entity delayed;

/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
struct dma_fence *last_immediate;

unsigned int pasid;
/* dedicated to vm */
Expand Down Expand Up @@ -379,7 +379,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct);
struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,

pe += (unsigned long)amdgpu_bo_kptr(bo);

trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);

for (i = 0; i < count; i++) {
value = p->pages_addr ?
Expand Down
23 changes: 12 additions & 11 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
AMDGPU_IB_POOL_DELAYED;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;

Expand Down Expand Up @@ -96,7 +96,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct amdgpu_ring *ring;
int r;

entity = p->direct ? &p->vm->direct : &p->vm->delayed;
entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);

WARN_ON(ib->length_dw == 0);
Expand All @@ -106,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;

if (p->direct) {
if (p->immediate) {
tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp);
swap(p->vm->last_immediate, f);
dma_fence_put(tmp);
} else {
dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv,
f);
}

if (fence && !p->direct)
if (fence && !p->immediate)
swap(*fence, f);
dma_fence_put(f);
return 0;
Expand Down Expand Up @@ -144,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;

pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);

amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
Expand All @@ -171,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;

pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
Expand Down Expand Up @@ -200,8 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE :
AMDGPU_IB_POOL_DELAYED;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
uint64_t *pte;
int r;
Expand Down

0 comments on commit eaad0c3

Please sign in to comment.