Skip to content

Commit

Permalink
drm/amdgpu: use scheduler dependencies for VM updates
Browse files Browse the repository at this point in the history
Instead of putting that into the job sync object.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-9-christian.koenig@amd.com
  • Loading branch information
Christian König committed Nov 3, 2022
1 parent 1b2d5ed commit aab9cf7
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 16 deletions.
56 changes: 42 additions & 14 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}

/* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{
hash_del(&e->node);
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}

/**
* amdgpu_sync_peek_fence - get the next fence not signaled yet
*
Expand All @@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);

if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
amdgpu_sync_entry_free(e);
continue;
}
if (ring && s_fence) {
Expand Down Expand Up @@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
if (r)
return r;
} else {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
amdgpu_sync_entry_free(e);
}
}

return 0;
}

/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
*
* Add all unsignaled fences from sync to job.
*/
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i, r;

hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
if (dma_fence_is_signaled(f)) {
amdgpu_sync_entry_free(e);
continue;
}

dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f);
if (r)
return r;
}
return 0;
}

int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
{
struct amdgpu_sync_entry *e;
Expand All @@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
if (r)
return r;

hash_del(&e->node);
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
amdgpu_sync_entry_free(e);
}

return 0;
Expand All @@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
struct hlist_node *tmp;
unsigned int i;

hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
hash_for_each_safe(sync->fences, i, tmp, e, node)
amdgpu_sync_entry_free(e);
}

/**
Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ struct dma_fence;
struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_job;

enum amdgpu_sync_mode {
AMDGPU_SYNC_ALWAYS,
Expand All @@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
Expand Down
10 changes: 8 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
struct amdgpu_sync sync;
int r;

r = amdgpu_vm_sdma_alloc_job(p, 0);
Expand All @@ -96,7 +97,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (!resv)
return 0;

return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
amdgpu_sync_create(&sync);
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
if (!r)
r = amdgpu_sync_push_to_job(&sync, p->job);
amdgpu_sync_free(&sync);
return r;
}

/**
Expand Down Expand Up @@ -225,7 +231,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* Wait for PD/PT moves to be completed */
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
r = amdgpu_sync_fence(&p->job->sync, fence);
r = drm_sched_job_add_dependency(&p->job->base, fence);
if (r) {
dma_resv_iter_end(&cursor);
return r;
Expand Down

0 comments on commit aab9cf7

Please sign in to comment.