Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 258177
b: refs/heads/master
c: b6fcd31
h: refs/heads/master
i:
  258175: b298378
v: v3
  • Loading branch information
Russell King committed Jun 28, 2011
1 parent 46ca232 commit 0ccadd7
Show file tree
Hide file tree
Showing 90 changed files with 516 additions and 1,093 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 022ae537b23cb14a391565e9ad9e9945f4b17138
refs/heads/master: b6fcd313c9db4f4c4feaf9d5d48b293d5fa27061
9 changes: 1 addition & 8 deletions trunk/Documentation/usb/error-codes.txt
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,6 @@ A transfer's actual_length may be positive even when an error has been
reported. That's because transfers often involve several packets, so that
one or more packets could finish before an error stops further endpoint I/O.

For isochronous URBs, the urb status value is non-zero only if the URB is
unlinked, the device is removed, the host controller is disabled, or the total
transferred length is less than the requested length and the URB_SHORT_NOT_OK
flag is set. Completion handlers for isochronous URBs should only see
urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
Individual frame descriptor status fields may report more status codes.


0 Transfer completed successfully

Expand Down Expand Up @@ -139,7 +132,7 @@ Individual frame descriptor status fields may report more status codes.
device removal events immediately.

-EXDEV ISO transfer only partially completed
(only set in iso_frame_desc[n].status, not urb->status)
look at individual frame status for details

-EINVAL ISO madness, if this happens: Log off and go home

Expand Down
12 changes: 2 additions & 10 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -6434,9 +6434,8 @@ S: Maintained
F: drivers/usb/misc/rio500*

USB EHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
S: Maintained
S: Orphan
F: Documentation/usb/ehci.txt
F: drivers/usb/host/ehci*

Expand Down Expand Up @@ -6466,12 +6465,6 @@ S: Maintained
F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/

USB/IP DRIVERS
M: Matt Mooney <mfm@muteddisk.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/staging/usbip/

USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
L: linux-usb@vger.kernel.org
Expand Down Expand Up @@ -6501,9 +6494,8 @@ S: Maintained
F: sound/usb/midi.*

USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
S: Maintained
S: Orphan
F: Documentation/usb/ohci.txt
F: drivers/usb/host/ohci*

Expand Down
193 changes: 110 additions & 83 deletions trunk/arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,6 @@ struct dmabounce_device_info {
struct dmabounce_pool large;

rwlock_t lock;

int (*needs_bounce)(struct device *, dma_addr_t, size_t);
};

#ifdef STATS
Expand Down Expand Up @@ -212,91 +210,114 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
if (!dev || !dev->archdata.dmabounce)
return NULL;
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Trying to %s invalid mapping\n", where);
if (dev)
dev_err(dev, "Trying to %s invalid mapping\n", where);
else
pr_err("unknown device: Trying to %s invalid mapping\n", where);
return NULL;
}
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}

static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
if (!dev || !dev->archdata.dmabounce)
return 0;
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dma_addr_t dma_addr;
int needs_bounce = 0;

if (device_info)
DO_STATS ( device_info->map_op_count++ );

dma_addr = virt_to_dma(dev, ptr);

if (dev->dma_mask) {
unsigned long limit, mask = *dev->dma_mask;
unsigned long mask = *dev->dma_mask;
unsigned long limit;

limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
return -E2BIG;
return ~0;
}

/* Figure out if we need to bounce from the DMA mask. */
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
return 1;
/*
* Figure out if we need to bounce from the DMA mask.
*/
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
}

return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
}

static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf;
if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
struct safe_buffer *buf;

if (device_info)
DO_STATS ( device_info->map_op_count++ );
buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == 0) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return 0;
}

buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return ~0;
}
dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);

dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
ptr = buf->safe;

if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
dma_addr = buf->safe_dma_addr;
} else {
/*
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
__dma_single_cpu_to_dev(ptr, size, dir);
}

return buf->safe_dma_addr;
return dma_addr;
}

static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);

DO_STATS(dev->archdata.dmabounce->bounce_count++);
dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);

if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;
DO_STATS(dev->archdata.dmabounce->bounce_count++);

dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;

/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area(ptr, size);
dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);

/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
} else {
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}

/* ************************************************** */
Expand All @@ -307,28 +328,45 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, dir);

BUG_ON(!valid_dma_direction(dir));

return map_single(dev, ptr, size, dir);
}
EXPORT_SYMBOL(__dma_map_single);

/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);

unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_single);

dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dma_addr_t dma_addr;
int ret;

dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
__func__, page, offset, size, dir);

dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;

ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
return ~0;

if (ret == 0) {
__dma_page_cpu_to_dev(page, offset, size, dir);
return dma_addr;
}
BUG_ON(!valid_dma_direction(dir));

if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
"is not supported\n");
return ~0;
}

Expand All @@ -345,19 +383,10 @@ EXPORT_SYMBOL(__dma_map_page);
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
struct safe_buffer *buf;

dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);

buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
dma_addr & ~PAGE_MASK, size, dir);
return;
}
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, dir);

unmap_single(dev, buf, size, dir);
unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);

Expand Down Expand Up @@ -432,8 +461,7 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
}

int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size,
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
unsigned long large_buffer_size)
{
struct dmabounce_device_info *device_info;
int ret;
Expand Down Expand Up @@ -469,7 +497,6 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock);
device_info->needs_bounce = needs_bounce_fn;

#ifdef STATS
device_info->total_allocs = 0;
Expand Down
16 changes: 9 additions & 7 deletions trunk/arch/arm/common/it8152.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,12 +243,6 @@ static struct resource it8152_mem = {
* ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
}

/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
Expand All @@ -260,7 +254,7 @@ static int it8152_pci_platform_notify(struct device *dev)
if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
dmabounce_register_dev(dev, 2048, 4096);
}
return 0;
}
Expand All @@ -273,6 +267,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return 0;
}

int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dev->bus == &pci_bus_type) &&
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}

int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
Expand Down
Loading

0 comments on commit 0ccadd7

Please sign in to comment.