Skip to content

Commit

Permalink
Merge tag 'drm-misc-fixes-2025-03-20' of ssh://gitlab.freedesktop.org…
Browse files Browse the repository at this point in the history
…/drm/misc/kernel into drm-fixes

A sched fence reference leak fix, two fence fixes for v3d, two overflow
fixes for quaic, and a iommu handling fix for host1x.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250320-valiant-outstanding-nightingale-e9acae@houat
  • Loading branch information
Dave Airlie committed Mar 21, 2025
2 parents 4701f33 + cb83f4b commit d273872
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 7 deletions.
9 changes: 7 additions & 2 deletions drivers/accel/qaic/qaic_data.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,16 +172,19 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
unsigned int len, nents, offf, offl;
struct sg_table *sgt;
size_t total_len;
int ret, j;

/* find out number of relevant nents needed for this mem */
total_len = 0;
sgf = NULL;
sgl = NULL;
nents = 0;
offf = 0;
offl = 0;

size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
Expand Down Expand Up @@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
u64 total;
int i;

for (i = 0; i < count; i++) {
Expand All @@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;

if (slice_ent[i].offset + slice_ent[i].size > total_size)
if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
total > total_size)
return -EINVAL;
}

Expand Down
11 changes: 9 additions & 2 deletions drivers/gpu/drm/scheduler/sched_entity.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;

dma_fence_get(&s_fence->finished);
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
drm_sched_entity_kill_jobs_cb))
if (!prev ||
dma_fence_add_callback(prev, &job->finish_cb,
drm_sched_entity_kill_jobs_cb)) {
/*
* Adding callback above failed.
* dma_fence_put() checks for NULL.
*/
dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
}

prev = &s_fence->finished;
}
Expand Down
23 changes: 20 additions & 3 deletions drivers/gpu/drm/v3d/v3d_sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;

if (unlikely(job->base.base.s_fence->finished.error))
if (unlikely(job->base.base.s_fence->finished.error)) {
spin_lock_irqsave(&v3d->job_lock, irqflags);
v3d->bin_job = NULL;
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
}

/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
Expand Down Expand Up @@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;

if (unlikely(job->base.base.s_fence->finished.error))
if (unlikely(job->base.base.s_fence->finished.error)) {
v3d->render_job = NULL;
return NULL;
}

v3d->render_job = job;

Expand Down Expand Up @@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;

if (unlikely(job->base.base.s_fence->finished.error)) {
v3d->tfu_job = NULL;
return NULL;
}

v3d->tfu_job = job;

fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;

v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
Expand Down Expand Up @@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;

if (unlikely(job->base.base.s_fence->finished.error)) {
v3d->csd_job = NULL;
return NULL;
}

v3d->csd_job = job;

v3d_invalidate_caches(v3d);
Expand Down
6 changes: 6 additions & 0 deletions drivers/gpu/host1x/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}

/*
* Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
* valid paging domain.
*/
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
Expand All @@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;

Expand Down

0 comments on commit d273872

Please sign in to comment.