Skip to content

Commit

Permalink
dma-buf: fix and rework dma_buf_poll v7
Browse files Browse the repository at this point in the history
Daniel pointed me towards this function and there are multiple obvious problems
in the implementation.

First of all the retry loop is not working as intended. In general the retry
makes only sense if you grab the reference first and then check the sequence
values.

Then we should always also wait for the exclusive fence.

It's also good practice to keep the reference around when installing callbacks
to fences you don't own.

And last the whole implementation was unnecessary complex and rather hard to
understand which could lead to probably unexpected behavior of the IOCTL.

Fix all this by reworking the implementation from scratch. Dropping the
whole RCU approach and taking the lock instead.

Only mildly tested and needs a thoughtful review of the code.

Pushing through drm-misc-next to avoid merge conflicts and give the code
another round of testing.

v2: fix the reference counting as well
v3: keep the excl fence handling as is for stable
v4: back to testing all fences, drop RCU
v5: handle in and out separately
v6: add missing clear of events
v7: change coding style as suggested by Michel, drop unused variables

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: Michel Dänzer <mdaenzer@redhat.com>
CC: stable@vger.kernel.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210720131110.88512-1-christian.koenig@amd.com
  • Loading branch information
Christian König committed Oct 1, 2021
1 parent 241ffeb commit 6b51b02
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 83 deletions.
152 changes: 70 additions & 82 deletions drivers/dma-buf/dma-buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);

dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf);
Expand Down Expand Up @@ -206,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
dma_fence_put(fence);
}

static bool dma_buf_poll_shared(struct dma_resv *resv,
struct dma_buf_poll_cb_t *dcb)
{
struct dma_resv_list *fobj = dma_resv_shared_list(resv);
struct dma_fence *fence;
int i, r;

if (!fobj)
return false;

for (i = 0; i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(resv));
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
return true;
dma_fence_put(fence);
}

return false;
}

static bool dma_buf_poll_excl(struct dma_resv *resv,
struct dma_buf_poll_cb_t *dcb)
{
struct dma_fence *fence = dma_resv_excl_fence(resv);
int r;

if (!fence)
return false;

dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
return true;
dma_fence_put(fence);

return false;
}

static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
unsigned shared_count, seq;

dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
Expand All @@ -229,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;

retry:
seq = read_seqcount_begin(&resv->seq);
rcu_read_lock();

fobj = rcu_dereference(resv->fence);
if (fobj)
shared_count = fobj->shared_count;
else
shared_count = 0;
fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
}

if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;
dma_resv_lock(resv, NULL);

if (shared_count == 0)
pevents |= EPOLLOUT;
if (events & EPOLLOUT) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;

/* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active) {
dcb->active |= pevents;
events &= ~pevents;
} else
dcb->active = pevents;
if (dcb->active)
events &= ~EPOLLOUT;
else
dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);

if (events & pevents) {
if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
dma_fence_put(fence_excl);
if (events & EPOLLOUT) {
if (!dma_buf_poll_shared(resv, dcb) &&
!dma_buf_poll_excl(resv, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
}
else
events &= ~EPOLLOUT;
}
}

if ((events & EPOLLOUT) && shared_count > 0) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
int i;
if (events & EPOLLIN) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;

/* Only queue a new callback if no event has fired yet */
/* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
events &= ~EPOLLOUT;
events &= ~EPOLLIN;
else
dcb->active = EPOLLOUT;
dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);

if (!(events & EPOLLOUT))
goto out;

for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);

if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
*
* call dma_buf_poll_cb and force a recheck!
*/
events &= ~EPOLLOUT;
if (events & EPOLLIN) {
if (!dma_buf_poll_excl(resv, dcb))
/* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
dma_fence_put(fence);
events &= ~EPOLLOUT;
break;
}
dma_fence_put(fence);
else
events &= ~EPOLLIN;
}

/* No callback queued, wake up any additional waiters. */
if (i == shared_count)
dma_buf_poll_cb(NULL, &dcb->cb);
}

out:
rcu_read_unlock();
dma_resv_unlock(resv);
return events;
}

Expand Down Expand Up @@ -566,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;

if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
Expand Down
2 changes: 1 addition & 1 deletion include/linux/dma-buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,7 @@ struct dma_buf {
wait_queue_head_t *poll;

__poll_t active;
} cb_excl, cb_shared;
} cb_in, cb_out;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/**
* @sysfs_entry:
Expand Down

0 comments on commit 6b51b02

Please sign in to comment.