Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 138896
b: refs/heads/master
c: f98eee8
h: refs/heads/master
v: v3
  • Loading branch information
FUJITA Tomonori authored and Ingo Molnar committed Jan 6, 2009
1 parent 91c00e4 commit 3b1fc0e
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 78 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 160c1d8e40866edfeae7d68816b7005d70acf391
refs/heads/master: f98eee8ea99fe74ee9c4e867ba178ec3072793be
16 changes: 0 additions & 16 deletions trunk/arch/ia64/kernel/pci-swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,6 @@ EXPORT_SYMBOL(swiotlb);
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly;

static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return swiotlb_map_single_attrs(dev, page_address(page) + offset, size,
dir, attrs);
}

static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
swiotlb_unmap_single_attrs(dev, dma_handle, size, dir, attrs);
}

struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
Expand Down
17 changes: 0 additions & 17 deletions trunk/arch/x86/kernel/pci-swiotlb_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,23 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
return 0;
}

/* these will be moved to lib/swiotlb.c later on */

static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return swiotlb_map_single(dev, page_address(page) + offset, size, dir);
}

static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
swiotlb_unmap_single(dev, dma_handle, size, dir);
}

static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
Expand Down
21 changes: 7 additions & 14 deletions trunk/include/linux/swiotlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,13 @@ extern void
swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);

extern dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir);

extern void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir);

extern dma_addr_t
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
int dir, struct dma_attrs *attrs);

extern void
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, struct dma_attrs *attrs);
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);

extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
Expand Down
48 changes: 18 additions & 30 deletions trunk/lib/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/
dma_addr_t
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
int dir, struct dma_attrs *attrs)
{
dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *ptr = page_address(page) + offset;
dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
void *map;

BUG_ON(dir == DMA_NONE);
Expand All @@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (!address_needs_mapping(hwdev, dev_addr, size) &&
if (!address_needs_mapping(dev, dev_addr, size) &&
!range_needs_mapping(ptr, size))
return dev_addr;

/*
* Oh well, have to allocate and map a bounce buffer.
*/
map = map_single(hwdev, virt_to_phys(ptr), size, dir);
map = map_single(dev, phys, size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
swiotlb_full(dev, size, dir, 1);
map = io_tlb_overflow_buffer;
}

dev_addr = swiotlb_virt_to_bus(hwdev, map);
dev_addr = swiotlb_virt_to_bus(dev, map);

/*
* Ensure that the address returned is DMA'ble
*/
if (address_needs_mapping(hwdev, dev_addr, size))
if (address_needs_mapping(dev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");

return dev_addr;
}
EXPORT_SYMBOL(swiotlb_map_single_attrs);

dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL_GPL(swiotlb_map_page);

/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
Expand All @@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single);
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
void
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, struct dma_attrs *attrs)
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
char *dma_addr = swiotlb_bus_to_virt(dev_addr);

Expand All @@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
}
EXPORT_SYMBOL(swiotlb_unmap_single_attrs);

void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{
return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
}
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);

/*
* Make physical memory consistent for a single streaming mode DMA translation
Expand Down

0 comments on commit 3b1fc0e

Please sign in to comment.