Skip to content

Commit

Permalink
dma-buf: specify usage while adding fences to dma_resv obj v7
Browse files Browse the repository at this point in the history
Instead of distingting between shared and exclusive fences specify
the fence usage while adding fences.

Rework all drivers to use this interface instead and deprecate the old one.

v2: some kerneldoc comments suggested by Daniel
v3: fix a missing case in radeon
v4: rebase on nouveau changes, fix lockdep and temporary disable warning
v5: more documentation updates
v6: separate internal dma_resv changes from this patch, avoids to
    disable warning temporary, rebase on upstream changes
v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com
  • Loading branch information
Christian König committed Apr 7, 2022
1 parent 7bc80a5 commit 73511ed
Show file tree
Hide file tree
Showing 30 changed files with 149 additions and 166 deletions.
48 changes: 37 additions & 11 deletions drivers/dma-buf/dma-resv.c
Original file line number Diff line number Diff line change
Expand Up @@ -234,14 +234,14 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);

#ifdef CONFIG_DEBUG_MUTEXES
/**
* dma_resv_reset_shared_max - reset shared fences for debugging
* dma_resv_reset_max_fences - reset shared fences for debugging
* @obj: the dma_resv object to reset
*
* Reset the number of pre-reserved shared slots to test that drivers do
* correct slot allocation using dma_resv_reserve_fences(). See also
* &dma_resv_list.shared_max.
*/
void dma_resv_reset_shared_max(struct dma_resv *obj)
void dma_resv_reset_max_fences(struct dma_resv *obj)
{
struct dma_resv_list *fences = dma_resv_shared_list(obj);

Expand All @@ -251,7 +251,7 @@ void dma_resv_reset_shared_max(struct dma_resv *obj)
if (fences)
fences->shared_max = fences->shared_count;
}
EXPORT_SYMBOL(dma_resv_reset_shared_max);
EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif

/**
Expand All @@ -264,7 +264,8 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
static void dma_resv_add_shared_fence(struct dma_resv *obj,
struct dma_fence *fence)
{
struct dma_resv_list *fobj;
struct dma_fence *old;
Expand Down Expand Up @@ -305,13 +306,13 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
write_seqcount_end(&obj->seq);
dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);

/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
* @usage: how the new fence is used, see enum dma_resv_usage
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
Expand All @@ -321,12 +322,16 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
struct dma_fence *replacement)
struct dma_fence *replacement,
enum dma_resv_usage usage)
{
struct dma_resv_list *list;
struct dma_fence *old;
unsigned int i;

/* Only readers supported for now */
WARN_ON(usage != DMA_RESV_USAGE_READ);

dma_resv_assert_held(obj);

write_seqcount_begin(&obj->seq);
Expand Down Expand Up @@ -360,7 +365,8 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
* See also &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
static void dma_resv_add_excl_fence(struct dma_resv *obj,
struct dma_fence *fence)
{
struct dma_fence *old_fence = dma_resv_excl_fence(obj);

Expand All @@ -375,7 +381,27 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)

dma_fence_put(old_fence);
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);

/**
* dma_resv_add_fence - Add a fence to the dma_resv obj
* @obj: the reservation object
* @fence: the fence to add
* @usage: how the fence is used, see enum dma_resv_usage
*
* Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_fences() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage usage)
{
if (usage == DMA_RESV_USAGE_WRITE)
dma_resv_add_excl_fence(obj, fence);
else
dma_resv_add_shared_fence(obj, fence);
}
EXPORT_SYMBOL(dma_resv_add_fence);

/* Restart the iterator by initializing all the necessary fields, but not the
* relation to the dma_resv object. */
Expand Down Expand Up @@ -574,7 +600,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
}

dma_fence_get(f);
if (dma_resv_iter_is_exclusive(&cursor))
if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
excl = f;
else
RCU_INIT_POINTER(list->shared[list->shared_count++], f);
Expand Down Expand Up @@ -771,13 +797,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
static const char *usage[] = { "write", "read" };
struct dma_resv_iter cursor;
struct dma_fence *fence;

dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
dma_resv_iter_is_exclusive(&cursor) ?
"Exclusive" : "Shared");
usage[dma_resv_iter_usage(&cursor)]);
dma_fence_describe(fence, seq);
}
}
Expand Down
101 changes: 27 additions & 74 deletions drivers/dma-buf/st-dma-resv.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
return r;
}

static int test_signaling(void *arg, enum dma_resv_usage usage)
static int test_signaling(void *arg)
{
enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv resv;
struct dma_fence *f;
int r;
Expand All @@ -81,11 +82,7 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
goto err_unlock;
}

if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);

dma_resv_add_fence(&resv, f, usage);
if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
Expand All @@ -105,18 +102,9 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
return r;
}

static int test_excl_signaling(void *arg)
{
return test_signaling(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_signaling(void *arg)
{
return test_signaling(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each(void *arg, enum dma_resv_usage usage)
static int test_for_each(void *arg)
{
enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
Expand All @@ -139,10 +127,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
goto err_unlock;
}

if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_add_fence(&resv, f, usage);

r = -ENOENT;
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
Expand All @@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
r = -EINVAL;
goto err_unlock;
}
if (dma_resv_iter_is_exclusive(&cursor) !=
(usage >= DMA_RESV_USAGE_READ)) {
if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
Expand All @@ -177,18 +161,9 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
return r;
}

static int test_excl_for_each(void *arg)
{
return test_for_each(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each(void *arg)
{
return test_for_each(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
static int test_for_each_unlocked(void *arg)
{
enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
Expand All @@ -212,10 +187,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
goto err_free;
}

if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);

r = -ENOENT;
Expand All @@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
r = -EINVAL;
goto err_iter_end;
}
if (dma_resv_iter_is_exclusive(&cursor) !=
(usage >= DMA_RESV_USAGE_READ)) {
if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
Expand All @@ -262,18 +233,9 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
return r;
}

static int test_excl_for_each_unlocked(void *arg)
{
return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each_unlocked(void *arg)
{
return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
}

static int test_get_fences(void *arg, enum dma_resv_usage usage)
static int test_get_fences(void *arg)
{
enum dma_resv_usage usage = (unsigned long)arg;
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
Expand All @@ -296,10 +258,7 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
goto err_resv;
}

if (usage >= DMA_RESV_USAGE_READ)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);

r = dma_resv_get_fences(&resv, usage, &i, &fences);
Expand All @@ -324,30 +283,24 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
return r;
}

static int test_excl_get_fences(void *arg)
{
return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_get_fences(void *arg)
{
return test_get_fences(arg, DMA_RESV_USAGE_READ);
}

int dma_resv(void)
{
static const struct subtest tests[] = {
SUBTEST(sanitycheck),
SUBTEST(test_excl_signaling),
SUBTEST(test_shared_signaling),
SUBTEST(test_excl_for_each),
SUBTEST(test_shared_for_each),
SUBTEST(test_excl_for_each_unlocked),
SUBTEST(test_shared_for_each_unlocked),
SUBTEST(test_excl_get_fences),
SUBTEST(test_shared_get_fences),
SUBTEST(test_signaling),
SUBTEST(test_for_each),
SUBTEST(test_for_each_unlocked),
SUBTEST(test_get_fences),
};
enum dma_resv_usage usage;
int r;

spin_lock_init(&fence_lock);
return subtests(tests, NULL);
for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
++usage) {
r = subtests(tests, (void *)(unsigned long)usage);
if (r)
return r;
}
return 0;
}
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
replacement);
replacement, DMA_RESV_USAGE_READ);
dma_fence_put(replacement);
return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
/* One for TTM and one for the CS job */
p->uf_entry.tv.num_shared = 2;
/* One for TTM and two for the CS job */
p->uf_entry.tv.num_shared = 3;

drm_gem_object_put(gobj);

Expand Down
6 changes: 2 additions & 4 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -1397,10 +1397,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
return;
}

if (shared)
dma_resv_add_shared_fence(resv, fence);
else
dma_resv_add_excl_fence(resv, fence);
dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
DMA_RESV_USAGE_WRITE);
}

/**
Expand Down
10 changes: 3 additions & 7 deletions drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
Original file line number Diff line number Diff line change
Expand Up @@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)

for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;

if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
dma_resv_add_excl_fence(obj->resv,
submit->out_fence);
else
dma_resv_add_shared_fence(obj->resv,
submit->out_fence);

dma_resv_add_fence(obj->resv, submit->out_fence, write ?
DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
Expand Down
6 changes: 3 additions & 3 deletions drivers/gpu/drm/i915/gem/i915_gem_busy.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,11 +148,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (dma_resv_iter_is_restarted(&cursor))
args->busy = 0;

if (dma_resv_iter_is_exclusive(&cursor))
/* Translate the exclusive fence to the READ *and* WRITE engine */
if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
/* Translate the write fences to the READ *and* WRITE engine */
args->busy |= busy_check_writer(fence);
else
/* Translate shared fences to READ set of engines */
/* Translate read fences to READ set of engines */
args->busy |= busy_check_reader(fence);
}
dma_resv_iter_end(&cursor);
Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_clflush.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
obj->base.resv, NULL, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
DMA_RESV_USAGE_WRITE);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
Expand Down
Loading

0 comments on commit 73511ed

Please sign in to comment.