Skip to content

Commit

Permalink
Merge master.kernel.org:/home/rmk/linux-2.6-arm
Browse files Browse the repository at this point in the history
  • Loading branch information
Linus Torvalds committed Oct 31, 2005
2 parents fc8e3d1 + 46595ff commit 1480d0a
Show file tree
Hide file tree
Showing 27 changed files with 557 additions and 467 deletions.
165 changes: 86 additions & 79 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
#include <asm/cacheflush.h>

#undef DEBUG

#undef STATS

#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
Expand All @@ -52,26 +52,31 @@ struct safe_buffer {
int direction;

/* safe buffer info */
struct dma_pool *pool;
struct dmabounce_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
};

struct dmabounce_pool {
unsigned long size;
struct dma_pool *pool;
#ifdef STATS
unsigned long allocs;
#endif
};

struct dmabounce_device_info {
struct list_head node;

struct device *dev;
struct dma_pool *small_buffer_pool;
struct dma_pool *large_buffer_pool;
struct list_head safe_buffers;
unsigned long small_buffer_size, large_buffer_size;
#ifdef STATS
unsigned long sbp_allocs;
unsigned long lbp_allocs;
unsigned long total_allocs;
unsigned long map_op_count;
unsigned long bounce_count;
#endif
struct dmabounce_pool small;
struct dmabounce_pool large;
};

static LIST_HEAD(dmabounce_devs);
Expand All @@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info)
printk(KERN_INFO
"%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
device_info->dev->bus_id,
device_info->sbp_allocs, device_info->lbp_allocs,
device_info->total_allocs - device_info->sbp_allocs -
device_info->lbp_allocs,
device_info->small.allocs, device_info->large.allocs,
device_info->total_allocs - device_info->small.allocs -
device_info->large.allocs,
device_info->total_allocs);
}
#endif
Expand All @@ -106,60 +111,58 @@ find_dmabounce_dev(struct device *dev)
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
size_t size, enum dma_data_direction dir)
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
struct dma_pool *pool;
struct dmabounce_pool *pool;
struct device *dev = device_info->dev;
void *safe;
dma_addr_t safe_dma_addr;

dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir);

DO_STATS ( device_info->total_allocs++ );
if (size <= device_info->small.size) {
pool = &device_info->small;
} else if (size <= device_info->large.size) {
pool = &device_info->large;
} else {
pool = NULL;
}

buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == NULL) {
dev_warn(dev, "%s: kmalloc failed\n", __func__);
return NULL;
}

if (size <= device_info->small_buffer_size) {
pool = device_info->small_buffer_pool;
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);

DO_STATS ( device_info->sbp_allocs++ );
} else if (size <= device_info->large_buffer_size) {
pool = device_info->large_buffer_pool;
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
buf->ptr = ptr;
buf->size = size;
buf->direction = dir;
buf->pool = pool;

DO_STATS ( device_info->lbp_allocs++ );
if (pool) {
buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
&buf->safe_dma_addr);
} else {
pool = NULL;
safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
GFP_ATOMIC);
}

if (safe == NULL) {
dev_warn(device_info->dev,
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
if (buf->safe == NULL) {
dev_warn(dev,
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
kfree(buf);
return NULL;
}

#ifdef STATS
if (pool)
pool->allocs++;
device_info->total_allocs++;
if (device_info->total_allocs % 1000 == 0)
print_alloc_stats(device_info);
#endif

buf->ptr = ptr;
buf->size = size;
buf->direction = dir;
buf->pool = pool;
buf->safe = safe;
buf->safe_dma_addr = safe_dma_addr;

list_add(&buf->node, &device_info->safe_buffers);

return buf;
Expand All @@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
list_del(&buf->node);

if (buf->pool)
dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
else
dma_free_coherent(device_info->dev, buf->size, buf->safe,
buf->safe_dma_addr);
Expand All @@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
/* ************************************************** */

#ifdef STATS

static void print_map_stats(struct dmabounce_device_info *device_info)
{
printk(KERN_INFO
"%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
device_info->dev->bus_id,
dev_info(device_info->dev,
"dmabounce: map_op_count=%lu, bounce_count=%lu\n",
device_info->map_op_count, device_info->bounce_count);
}
#endif
Expand Down Expand Up @@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size,
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
consistent_sync(buf->safe, size, dir);
ptr = buf->safe;

dma_addr = buf->safe_dma_addr;
} else {
consistent_sync(ptr, size, dir);
}

consistent_sync(ptr, size, dir);

return dma_addr;
}

Expand All @@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
/*
* Trying to unmap an invalid mapping
*/
if (dma_addr == ~0) {
if (dma_mapping_error(dma_addr)) {
dev_err(dev, "Trying to unmap invalid mapping\n");
return;
}
Expand Down Expand Up @@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
local_irq_restore(flags);
}

static int
dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
unsigned long size)
{
pool->size = size;
DO_STATS(pool->allocs = 0);
pool->pool = dma_pool_create(name, dev, size,
0 /* byte alignment */,
0 /* no page-crossing issues */);

return pool->pool ? 0 : -ENOMEM;
}

int
dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size)
{
struct dmabounce_device_info *device_info;
int ret;

device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
if (!device_info) {
Expand All @@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
return -ENOMEM;
}

device_info->small_buffer_pool =
dma_pool_create("small_dmabounce_pool",
dev,
small_buffer_size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (!device_info->small_buffer_pool) {
printk(KERN_ERR
"dmabounce: could not allocate small DMA pool for %s\n",
dev->bus_id);
kfree(device_info);
return -ENOMEM;
ret = dmabounce_init_pool(&device_info->small, dev,
"small_dmabounce_pool", small_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
small_buffer_size);
goto err_free;
}

if (large_buffer_size) {
device_info->large_buffer_pool =
dma_pool_create("large_dmabounce_pool",
dev,
large_buffer_size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (!device_info->large_buffer_pool) {
printk(KERN_ERR
"dmabounce: could not allocate large DMA pool for %s\n",
dev->bus_id);
dma_pool_destroy(device_info->small_buffer_pool);

return -ENOMEM;
ret = dmabounce_init_pool(&device_info->large, dev,
"large_dmabounce_pool",
large_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
large_buffer_size);
goto err_destroy;
}
}

device_info->dev = dev;
device_info->small_buffer_size = small_buffer_size;
device_info->large_buffer_size = large_buffer_size;
INIT_LIST_HEAD(&device_info->safe_buffers);

#ifdef STATS
device_info->sbp_allocs = 0;
device_info->lbp_allocs = 0;
device_info->total_allocs = 0;
device_info->map_op_count = 0;
device_info->bounce_count = 0;
Expand All @@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
dev->bus_id, dev->bus->name);

return 0;

err_destroy:
dma_pool_destroy(device_info->small.pool);
err_free:
kfree(device_info);
return ret;
}

void
Expand All @@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev)
BUG();
}

if (device_info->small_buffer_pool)
dma_pool_destroy(device_info->small_buffer_pool);
if (device_info->large_buffer_pool)
dma_pool_destroy(device_info->large_buffer_pool);
if (device_info->small.pool)
dma_pool_destroy(device_info->small.pool);
if (device_info->large.pool)
dma_pool_destroy(device_info->large.pool);

#ifdef STATS
print_alloc_stats(device_info);
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/configs/ixdp2400_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_NR_UARTS=1
# CONFIG_SERIAL_8250_EXTENDED is not set

#
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/configs/ixdp2800_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_NR_UARTS=1
# CONFIG_SERIAL_8250_EXTENDED is not set

#
Expand Down
29 changes: 17 additions & 12 deletions arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,33 +198,38 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
barrier();
}

DEFINE_SPINLOCK(die_lock);

/*
* This function is protected against re-entrancy.
*/
NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct task_struct *tsk = thread->task;
static int die_counter;

console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);

printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
print_modules();
__show_regs(regs);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
tsk->comm, tsk->pid, tsk->thread_info + 1);
tsk->comm, tsk->pid, thread + 1);

if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)tsk->thread_info);
dump_backtrace(regs, tsk);
dump_instr(regs);
}
}

DEFINE_SPINLOCK(die_lock);

/*
* This function is protected against re-entrancy.
*/
NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();

console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
__die(str, err, thread, regs);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
Expand Down
Loading

0 comments on commit 1480d0a

Please sign in to comment.