Skip to content

Commit

Permalink
Merge branch 'dma' of http://git.linaro.org/git/people/nico/linux int…
Browse files Browse the repository at this point in the history
…o devel-stable
  • Loading branch information
Russell King committed Jul 18, 2011
2 parents 4aa96cc + fb89fcf commit 07f1c29
Show file tree
Hide file tree
Showing 55 changed files with 304 additions and 288 deletions.
193 changes: 83 additions & 110 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ struct dmabounce_device_info {
struct dmabounce_pool large;

rwlock_t lock;

int (*needs_bounce)(struct device *, dma_addr_t, size_t);
};

#ifdef STATS
Expand Down Expand Up @@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
if (!dev || !dev->archdata.dmabounce)
return NULL;
if (dma_mapping_error(dev, dma_addr)) {
if (dev)
dev_err(dev, "Trying to %s invalid mapping\n", where);
else
pr_err("unknown device: Trying to %s invalid mapping\n", where);
dev_err(dev, "Trying to %s invalid mapping\n", where);
return NULL;
}
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}

static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dma_addr_t dma_addr;
int needs_bounce = 0;

if (device_info)
DO_STATS ( device_info->map_op_count++ );

dma_addr = virt_to_dma(dev, ptr);
if (!dev || !dev->archdata.dmabounce)
return 0;

if (dev->dma_mask) {
unsigned long mask = *dev->dma_mask;
unsigned long limit;
unsigned long limit, mask = *dev->dma_mask;

limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
return ~0;
return -E2BIG;
}

/*
* Figure out if we need to bounce from the DMA mask.
*/
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
/* Figure out if we need to bounce from the DMA mask. */
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
return 1;
}

if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
struct safe_buffer *buf;
return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
}

buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == 0) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return ~0;
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf;

dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
if (device_info)
DO_STATS ( device_info->map_op_count++ );

if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
ptr = buf->safe;
buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return ~0;
}

dma_addr = buf->safe_dma_addr;
} else {
/*
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
__dma_single_cpu_to_dev(ptr, size, dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);

if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}

return dma_addr;
return buf->safe_dma_addr;
}

static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);

dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);

DO_STATS(dev->archdata.dmabounce->bounce_count++);
DO_STATS(dev->archdata.dmabounce->bounce_count++);

if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;

dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);

/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
} else {
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}

/* ************************************************** */
Expand All @@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, dir);

BUG_ON(!valid_dma_direction(dir));

return map_single(dev, ptr, size, dir);
}
EXPORT_SYMBOL(__dma_map_single);

/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);

unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_single);

dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dma_addr_t dma_addr;
int ret;

dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
__func__, page, offset, size, dir);

BUG_ON(!valid_dma_direction(dir));
dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;

ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
return ~0;

if (ret == 0) {
__dma_page_cpu_to_dev(page, offset, size, dir);
return dma_addr;
}

if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
"is not supported\n");
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
return ~0;
}

Expand All @@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);
struct safe_buffer *buf;

dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);

buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
dma_addr & ~PAGE_MASK, size, dir);
return;
}

unmap_single(dev, dma_addr, size, dir);
unmap_single(dev, buf, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);

Expand Down Expand Up @@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
}

int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size)
unsigned long large_buffer_size,
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
{
struct dmabounce_device_info *device_info;
int ret;
Expand Down Expand Up @@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock);
device_info->needs_bounce = needs_bounce_fn;

#ifdef STATS
device_info->total_allocs = 0;
Expand Down
16 changes: 7 additions & 9 deletions arch/arm/common/it8152.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,12 @@ static struct resource it8152_mem = {
* ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
}

/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
Expand All @@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dmabounce_register_dev(dev, 2048, 4096);
dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
}
return 0;
}
Expand All @@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return 0;
}

int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dev->bus == &pci_bus_type) &&
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}

int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
Expand Down
60 changes: 31 additions & 29 deletions arch/arm/common/sa1111.c
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,

sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
}
#endif

#ifdef CONFIG_DMABOUNCE
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return (machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
}
#endif

static void sa1111_dev_release(struct device *_dev)
Expand Down Expand Up @@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev->dev.dma_mask = &dev->dma_mask;

if (dev->dma_mask != 0xffffffffUL) {
ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
sa1111_needs_bounce);
if (ret) {
dev_err(&dev->dev, "SA1111: Failed to register"
" with dmabounce\n");
Expand Down Expand Up @@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
kfree(sachip);
}

/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return ((machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
}

struct sa1111_save_data {
unsigned int skcr;
unsigned int skpcr;
Expand Down
Loading

0 comments on commit 07f1c29

Please sign in to comment.