Skip to content

Commit

Permalink
dma-mapping: trace more error paths
Browse files Browse the repository at this point in the history
It can be surprising to the user if DMA functions are only traced on
success. On failure, it can be unclear what the source of the problem
is. Fix this by tracing all functions even when they fail. Cases where
we BUG/WARN are skipped, since those should be sufficiently noisy
already.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
  • Loading branch information
Sean Anderson authored and Christoph Hellwig committed Oct 29, 2024
1 parent c4484ab commit 68b6dbf
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 7 deletions.
36 changes: 36 additions & 0 deletions include/trace/events/dma.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ DEFINE_EVENT(dma_alloc_class, name, \

DEFINE_ALLOC_EVENT(dma_alloc);
DEFINE_ALLOC_EVENT(dma_alloc_pages);
DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);

TRACE_EVENT(dma_alloc_sgt,
TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
Expand Down Expand Up @@ -320,6 +321,41 @@ TRACE_EVENT(dma_map_sg,
decode_dma_attrs(__entry->attrs))
);

TRACE_EVENT(dma_map_sg_err,
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
int err, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, sgl, nents, err, dir, attrs),

TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, phys_addrs, nents)
__field(int, err)
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),

TP_fast_assign(
struct scatterlist *sg;
int i;

__assign_str(device);
for_each_sg(sgl, sg, nents, i)
((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
__entry->err = err;
__entry->dir = dir;
__entry->attrs = attrs;
),

TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__print_array(__get_dynamic_array(phys_addrs),
__get_dynamic_array_len(phys_addrs) /
sizeof(u64), sizeof(u64)),
__entry->err,
decode_dma_attrs(__entry->attrs))
);

TRACE_EVENT(dma_unmap_sg,
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs),
Expand Down
25 changes: 18 additions & 7 deletions kernel/dma/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO && ents != -EREMOTEIO)) {
trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
return -EIO;
}

Expand Down Expand Up @@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (WARN_ON_ONCE(flag & __GFP_COMP))
return NULL;

if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
DMA_BIDIRECTIONAL, flag, attrs);
return cpu_addr;
}

/* let the implementation decide on the zone to allocate from: */
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

if (dma_alloc_direct(dev, ops))
if (dma_alloc_direct(dev, ops)) {
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
else if (use_dma_iommu(dev))
} else if (use_dma_iommu(dev)) {
cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
else if (ops->alloc)
} else if (ops->alloc) {
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
else
} else {
trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
attrs);
return NULL;
}

trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
flag, attrs);
Expand All @@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
*/
WARN_ON(irqs_disabled());

trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
attrs);
if (!cpu_addr)
return;

trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
if (dma_alloc_direct(dev, ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
Expand Down Expand Up @@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
size, dir, gfp, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
} else {
trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
}
return page;
}
Expand Down Expand Up @@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
sgt->nents = 1;
trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
} else {
trace_dma_alloc_sgt_err(dev, NULL, 0, size, gfp, dir, attrs);
}
return sgt;
}
Expand Down

0 comments on commit 68b6dbf

Please sign in to comment.