From 28f6b511457481a2132b27351730baa1367884c0 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 4 Feb 2008 22:28:03 -0800 Subject: [PATCH] --- yaml --- r: 83015 b: refs/heads/master c: d1b5163206769aa93271bc1029e877ea9f920a5d h: refs/heads/master i: 83013: 77d549d73abd18546c2fe5e463a2b148a2985c33 83011: bfc6a158ef4edcf6f6754f3b482e5900f0aff9c1 83007: 5550372f13b3f2e9671f09fc93dc6c9876d05446 v: v3 --- [refs] | 2 +- trunk/drivers/parisc/ccio-dma.c | 2 +- trunk/drivers/parisc/iommu-helpers.h | 7 ++++++- trunk/drivers/parisc/sba_iommu.c | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/[refs] b/[refs] index 332d9d3673d8..0e4bfd3ad11f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fde6a3c82d67f592eb587be4d12222b0ae6d4321 +refs/heads/master: d1b5163206769aa93271bc1029e877ea9f920a5d diff --git a/trunk/drivers/parisc/ccio-dma.c b/trunk/drivers/parisc/ccio-dma.c index ca52307b8f40..d08b284de196 100644 --- a/trunk/drivers/parisc/ccio-dma.c +++ b/trunk/drivers/parisc/ccio-dma.c @@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ - coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); + coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range); /* ** Program the I/O Pdir diff --git a/trunk/drivers/parisc/iommu-helpers.h b/trunk/drivers/parisc/iommu-helpers.h index 0a1f99a2e93e..97ba8286c596 100644 --- a/trunk/drivers/parisc/iommu-helpers.h +++ b/trunk/drivers/parisc/iommu-helpers.h @@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, */ static inline unsigned int -iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, +iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, + struct scatterlist *startsg, int nents, int (*iommu_alloc_range)(struct ioc *, size_t)) { struct scatterlist *contig_sg; /* contig chunk head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ unsigned int n_mappings = 0; + unsigned int max_seg_size = dma_get_max_seg_size(dev); while (nents > 0) { @@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, IOVP_SIZE) > DMA_CHUNK_SIZE)) break; + if (startsg->length + dma_len > max_seg_size) + break; + /* ** Next see if we can append the next chunk (i.e. ** it must end on one page and begin on another diff --git a/trunk/drivers/parisc/sba_iommu.c b/trunk/drivers/parisc/sba_iommu.c index e527a0e1d6c0..d06627c3f353 100644 --- a/trunk/drivers/parisc/sba_iommu.c +++ b/trunk/drivers/parisc/sba_iommu.c @@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ - coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); + coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); /* ** Program the I/O Pdir