Skip to content

Commit

Permalink
Merge branch 'for-2.6.25' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/paulus/powerpc

* 'for-2.6.25' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Add arch-specific walk_memory_remove() for 64-bit powerpc
  [POWERPC] Enable hotplug memory remove for 64-bit powerpc
  [POWERPC] Add remove_memory() for 64-bit powerpc
  [POWERPC] Make cell IOMMU fixed mapping printk more useful
  [POWERPC] Fix potential cell IOMMU bug when switching back to default DMA ops
  [POWERPC] Don't enable cell IOMMU fixed mapping if there are no dma-ranges
  [POWERPC] Fix cell IOMMU null pointer explosion on old firmwares
  [POWERPC] spufs: Fix timing dependent false return from spufs_run_spu
  [POWERPC] spufs: No need to have a runnable SPU for libassist update
  [POWERPC] spufs: Update SPU_Status[CISHP] in backing runcntl write
  [POWERPC] spufs: Fix state_mutex leaks
  [POWERPC] Disable G5 NAP mode during SMU commands on U3
  • Loading branch information
Linus Torvalds committed Feb 8, 2008
2 parents f3aafa6 + a99824f commit dde0013
Show file tree
Hide file tree
Showing 12 changed files with 166 additions and 60 deletions.
6 changes: 6 additions & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,12 @@ config HOTPLUG_CPU
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y

config ARCH_HAS_WALK_MEMORY
def_bool y

config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y

config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on (PPC_PRPMC2800 || PPC_MULTIPLATFORM) && EXPERIMENTAL
Expand Down
33 changes: 33 additions & 0 deletions arch/powerpc/mm/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,39 @@ int __devinit arch_add_memory(int nid, u64 start, u64 size)
return __add_pages(zone, start_pfn, nr_pages);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int remove_memory(u64 start, u64 size)
{
unsigned long start_pfn, end_pfn;
int ret;

start_pfn = start >> PAGE_SHIFT;
end_pfn = start_pfn + (size >> PAGE_SHIFT);
ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
if (ret)
goto out;
/* Arch-specific calls go here - next patch */
out:
return ret;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

/*
* walk_memory_resource() needs to make sure there is no holes in a given
* memory range. On PPC64, since this range comes from /sysfs, the range
* is guaranteed to be valid, non-overlapping and can not contain any
* holes. By the time we get here (memory add or remove), /proc/device-tree
* is updated and correct. Only reason we need to check against device-tree
* would be if we allow user-land to specify a memory range through a
* system call/ioctl etc. instead of doing offline/online through /sysfs.
*/
int
walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
int (*func)(unsigned long, unsigned long, void *))
{
return (*func)(start_pfn, nr_pages, arg);
}

#endif /* CONFIG_MEMORY_HOTPLUG */

void show_mem(void)
Expand Down
48 changes: 28 additions & 20 deletions arch/powerpc/platforms/cell/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_platform.h>

#include <asm/prom.h>
Expand Down Expand Up @@ -789,18 +790,16 @@ static int __init cell_iommu_init_disabled(void)
static u64 cell_iommu_get_fixed_address(struct device *dev)
{
u64 cpu_addr, size, best_size, pci_addr = OF_BAD_ADDR;
struct device_node *tmp, *np;
struct device_node *np;
const u32 *ranges = NULL;
int i, len, best;

np = dev->archdata.of_node;
of_node_get(np);
ranges = of_get_property(np, "dma-ranges", &len);
while (!ranges && np) {
tmp = of_get_parent(np);
of_node_put(np);
np = tmp;
np = of_node_get(dev->archdata.of_node);
while (np) {
ranges = of_get_property(np, "dma-ranges", &len);
if (ranges)
break;
np = of_get_next_parent(np);
}

if (!ranges) {
Expand Down Expand Up @@ -842,19 +841,18 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;

if (dma_mask == DMA_BIT_MASK(64)) {
if (cell_iommu_get_fixed_address(dev) == OF_BAD_ADDR)
dev_dbg(dev, "iommu: 64-bit OK, but bad addr\n");
else {
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
set_dma_ops(dev, &dma_iommu_fixed_ops);
cell_dma_dev_setup(dev);
}
if (dma_mask == DMA_BIT_MASK(64) &&
cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
{
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
set_dma_ops(dev, &dma_iommu_fixed_ops);
} else {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
set_dma_ops(dev, get_pci_dma_ops());
}

cell_dma_dev_setup(dev);

*dev->dma_mask = dma_mask;

return 0;
Expand Down Expand Up @@ -918,6 +916,18 @@ static int __init cell_iommu_fixed_mapping_init(void)
return -1;
}

/* We must have dma-ranges properties for fixed mapping to work */
for (np = NULL; (np = of_find_all_nodes(np));) {
if (of_find_property(np, "dma-ranges", NULL))
break;
}
of_node_put(np);

if (!np) {
pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
return -1;
}

/* The default setup is to have the fixed mapping sit after the
* dynamic region, so find the top of the largest IOMMU window
* on any axon, then add the size of RAM and that's our max value.
Expand Down Expand Up @@ -981,8 +991,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
dsize = htab_size_bytes;
}

pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \
"fixed window %lx-%lx\n", iommu->nid, dbase,
printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
"fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
dbase + dsize, fbase, fbase + fsize);

cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize);
Expand All @@ -998,8 +1008,6 @@ static int __init cell_iommu_fixed_mapping_init(void)
dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
set_pci_dma_ops(&dma_iommu_ops);

printk(KERN_DEBUG "IOMMU fixed mapping established.\n");

return 0;
}

Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/platforms/cell/spufs/backing_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,12 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
spin_lock(&ctx->csa.register_lock);
ctx->csa.prob.spu_runcntl_RW = val;
if (val & SPU_RUNCNTL_RUNNABLE) {
ctx->csa.prob.spu_status_R &=
~SPU_STATUS_STOPPED_BY_STOP &
~SPU_STATUS_STOPPED_BY_HALT &
~SPU_STATUS_SINGLE_STEP &
~SPU_STATUS_INVALID_INSTR &
~SPU_STATUS_INVALID_CH;
ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
} else {
ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
Expand Down
12 changes: 4 additions & 8 deletions arch/powerpc/platforms/cell/spufs/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx)
u64 ea, dsisr, access;
unsigned long flags;
unsigned flt = 0;
int ret, ret2;
int ret;

/*
* dar and dsisr get passed from the registers
Expand Down Expand Up @@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);

/*
* If spu_acquire fails due to a pending signal we just want to return
* EINTR to userspace even if that means missing the dma restart or
* updating the page fault statistics.
* This is nasty: we need the state_mutex for all the bookkeeping even
* if the syscall was interrupted by a signal. ewww.
*/
ret2 = spu_acquire(ctx);
if (ret2)
goto out;
mutex_lock(&ctx->state_mutex);

/*
* Clear dsisr under ctxt lock after handling the fault, so that
Expand Down Expand Up @@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx)
} else
spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);

out:
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
}
49 changes: 29 additions & 20 deletions arch/powerpc/platforms/cell/spufs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
{
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
int ret = 0;

spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);

Expand All @@ -379,7 +380,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
up_read(&current->mm->mmap_sem);
spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
down_read(&current->mm->mmap_sem);
} else {
Expand All @@ -388,7 +389,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
}

spu_release(ctx);
if (!ret)
spu_release(ctx);
return NOPFN_REFAULT;
}

Expand Down Expand Up @@ -755,23 +757,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,

count = spu_acquire(ctx);
if (count)
return count;
goto out;

/* wait only for the first element */
count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_ibox_read(ctx, &ibox_data))
if (!spu_ibox_read(ctx, &ibox_data)) {
count = -EAGAIN;
goto out_unlock;
}
} else {
count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
if (count)
goto out;
}
if (count)
goto out;

/* if we can't write at all, return -EFAULT */
count = __put_user(ibox_data, udata);
if (count)
goto out;
goto out_unlock;

for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
int ret;
Expand All @@ -788,9 +792,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
break;
}

out:
out_unlock:
spu_release(ctx);

out:
return count;
}

Expand Down Expand Up @@ -905,22 +909,24 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,

count = spu_acquire(ctx);
if (count)
return count;
goto out;

/*
* make sure we can at least write one element, by waiting
* in case of !O_NONBLOCK
*/
count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_wbox_write(ctx, wbox_data))
if (!spu_wbox_write(ctx, wbox_data)) {
count = -EAGAIN;
goto out_unlock;
}
} else {
count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
if (count)
goto out;
}

if (count)
goto out;

/* write as much as possible */
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
Expand All @@ -934,8 +940,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
break;
}

out:
out_unlock:
spu_release(ctx);
out:
return count;
}

Expand Down Expand Up @@ -1598,12 +1605,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
} else {
ret = spufs_wait(ctx->mfc_wq,
spufs_read_mfc_tagstatus(ctx, &status));
if (ret)
goto out;
}
spu_release(ctx);

if (ret)
goto out;

ret = 4;
if (copy_to_user(buffer, &status, 4))
ret = -EFAULT;
Expand Down Expand Up @@ -1732,6 +1738,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
int status;
ret = spufs_wait(ctx->mfc_wq,
spu_send_mfc_command(ctx, cmd, &status));
if (ret)
goto out;
if (status)
ret = status;
}
Expand Down Expand Up @@ -1785,7 +1793,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)

ret = spu_acquire(ctx);
if (ret)
return ret;
goto out;
#if 0
/* this currently hangs */
ret = spufs_wait(ctx->mfc_wq,
Expand All @@ -1794,12 +1802,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
goto out;
ret = spufs_wait(ctx->mfc_wq,
ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
out:
if (ret)
goto out;
#else
ret = 0;
#endif
spu_release(ctx);

out:
return ret;
}

Expand Down
Loading

0 comments on commit dde0013

Please sign in to comment.