Skip to content

Commit

Permalink
[PATCH] spufs: abstract priv1 register access.
Browse files Browse the repository at this point in the history
In a hypervisor based setup, direct access to the first
priviledged register space can typically not be allowed
to the kernel and has to be implemented through hypervisor
calls.

As suggested by Masato Noguchi, let's abstract the register
access trough a number of function calls. Since there is
currently no public specification of actual hypervisor
calls to implement this, I only provide a place that
makes it easier to hook into.

Cc: Masato Noguchi <Masato.Noguchi@jp.sony.com>
Cc: Geoff Levand <geoff.levand@am.sony.com>
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Arnd Bergmann authored and Paul Mackerras committed Jan 9, 2006
1 parent ce8ab85 commit f0831ac
Show file tree
Hide file tree
Showing 6 changed files with 233 additions and 135 deletions.
5 changes: 4 additions & 1 deletion arch/powerpc/platforms/cell/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ obj-y += interrupt.o iommu.o setup.o spider-pic.o
obj-y += pervasive.o

obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SPU_FS) += spufs/ spu_base.o
obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o

spu-base-y += spu_base.o spu_priv1.o

builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
obj-y += $(builtin-spufs-m)
51 changes: 18 additions & 33 deletions arch/powerpc/platforms/cell/spu_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ static int __spu_trap_mailbox(struct spu *spu)

/* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock);
out_be64(&spu->priv1->int_mask_class2_RW,
in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
spu_int_mask_and(spu, 2, ~0x1);
spin_unlock(&spu->register_lock);
return 0;
}
Expand Down Expand Up @@ -180,8 +179,7 @@ static int __spu_trap_spubox(struct spu *spu)

/* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock);
out_be64(&spu->priv1->int_mask_class2_RW,
in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
spu_int_mask_and(spu, 2, ~0x10);
spin_unlock(&spu->register_lock);
return 0;
}
Expand All @@ -206,8 +204,8 @@ spu_irq_class_0_bottom(struct spu *spu)

spu->class_0_pending = 0;

mask = in_be64(&spu->priv1->int_mask_class0_RW);
stat = in_be64(&spu->priv1->int_stat_class0_RW);
mask = spu_int_mask_get(spu, 0);
stat = spu_int_stat_get(spu, 0);

stat &= mask;

Expand All @@ -220,7 +218,7 @@ spu_irq_class_0_bottom(struct spu *spu)
if (stat & 4) /* error on SPU */
__spu_trap_error(spu);

out_be64(&spu->priv1->int_stat_class0_RW, stat);
spu_int_stat_clear(spu, 0, stat);

return (stat & 0x7) ? -EIO : 0;
}
Expand All @@ -236,13 +234,13 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)

/* atomically read & clear class1 status. */
spin_lock(&spu->register_lock);
mask = in_be64(&spu->priv1->int_mask_class1_RW);
stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
dar = in_be64(&spu->priv1->mfc_dar_RW);
dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
mask = spu_int_mask_get(spu, 1);
stat = spu_int_stat_get(spu, 1) & mask;
dar = spu_mfc_dar_get(spu);
dsisr = spu_mfc_dsisr_get(spu);
if (stat & 2) /* mapping fault */
out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
out_be64(&spu->priv1->int_stat_class1_RW, stat);
spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock);

if (stat & 1) /* segment fault */
Expand Down Expand Up @@ -270,8 +268,8 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
unsigned long mask;

spu = data;
stat = in_be64(&spu->priv1->int_stat_class2_RW);
mask = in_be64(&spu->priv1->int_mask_class2_RW);
stat = spu_int_stat_get(spu, 2);
mask = spu_int_mask_get(spu, 2);

pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);

Expand All @@ -292,7 +290,7 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
if (stat & 0x10) /* SPU mailbox threshold */
__spu_trap_spubox(spu);

out_be64(&spu->priv1->int_stat_class2_RW, stat);
spu_int_stat_clear(spu, 2, stat);
return stat ? IRQ_HANDLED : IRQ_NONE;
}

Expand All @@ -309,21 +307,18 @@ spu_request_irqs(struct spu *spu)
spu_irq_class_0, 0, spu->irq_c0, spu);
if (ret)
goto out;
out_be64(&spu->priv1->int_mask_class0_RW, 0x7);

snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_1, 0, spu->irq_c1, spu);
if (ret)
goto out1;
out_be64(&spu->priv1->int_mask_class1_RW, 0x3);

snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_2, 0, spu->irq_c2, spu);
if (ret)
goto out2;
out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
goto out;

out2:
Expand Down Expand Up @@ -383,13 +378,6 @@ static void spu_init_channels(struct spu *spu)
}
}

static void spu_init_regs(struct spu *spu)
{
out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
}

struct spu *spu_alloc(void)
{
struct spu *spu;
Expand All @@ -405,10 +393,8 @@ struct spu *spu_alloc(void)
}
up(&spu_mutex);

if (spu) {
if (spu)
spu_init_channels(spu);
spu_init_regs(spu);
}

return spu;
}
Expand Down Expand Up @@ -579,8 +565,7 @@ static int __init spu_map_device(struct spu *spu, struct device_node *spe)
goto out_unmap;

spu->priv1= map_spe_prop(spe, "priv1");
if (!spu->priv1)
goto out_unmap;
/* priv1 is not available on a hypervisor */

spu->priv2= map_spe_prop(spe, "priv2");
if (!spu->priv2)
Expand Down Expand Up @@ -633,8 +618,8 @@ static int __init create_spu(struct device_node *spe)
spu->dsisr = 0UL;
spin_lock_init(&spu->register_lock);

out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
spu_mfc_sr1_set(spu, 0x33);

spu->ibox_callback = NULL;
spu->wbox_callback = NULL;
Expand Down
133 changes: 133 additions & 0 deletions arch/powerpc/platforms/cell/spu_priv1.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
* access to SPU privileged registers
*/
#include <linux/module.h>

#include <asm/io.h>
#include <asm/spu.h>

void spu_int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;

old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_and);

void spu_int_mask_or(struct spu *spu, int class, u64 mask)
{
u64 old_mask;

old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_or);

void spu_int_mask_set(struct spu *spu, int class, u64 mask)
{
out_be64(&spu->priv1->int_mask_RW[class], mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_set);

u64 spu_int_mask_get(struct spu *spu, int class)
{
return in_be64(&spu->priv1->int_mask_RW[class]);
}
EXPORT_SYMBOL_GPL(spu_int_mask_get);

void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
{
out_be64(&spu->priv1->int_stat_RW[class], stat);
}
EXPORT_SYMBOL_GPL(spu_int_stat_clear);

u64 spu_int_stat_get(struct spu *spu, int class)
{
return in_be64(&spu->priv1->int_stat_RW[class]);
}
EXPORT_SYMBOL_GPL(spu_int_stat_get);

void spu_int_route_set(struct spu *spu, u64 route)
{
out_be64(&spu->priv1->int_route_RW, route);
}
EXPORT_SYMBOL_GPL(spu_int_route_set);

u64 spu_mfc_dar_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_dar_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_dar_get);

u64 spu_mfc_dsisr_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_dsisr_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);

void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
{
out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
}
EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);

void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
{
out_be64(&spu->priv1->mfc_sdr_RW, sdr);
}
EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);

void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
{
out_be64(&spu->priv1->mfc_sr1_RW, sr1);
}
EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);

u64 spu_mfc_sr1_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_sr1_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);

void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
{
out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
}
EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);

u64 spu_mfc_tclass_id_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_tclass_id_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);

void spu_tlb_invalidate(struct spu *spu)
{
out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
}
EXPORT_SYMBOL_GPL(spu_tlb_invalidate);

void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
{
out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);

u64 spu_resource_allocation_groupID_get(struct spu *spu)
{
return in_be64(&spu->priv1->resource_allocation_groupID_RW);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);

void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
{
out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);

u64 spu_resource_allocation_enable_get(struct spu *spu)
{
return in_be64(&spu->priv1->resource_allocation_enable_RW);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
19 changes: 6 additions & 13 deletions arch/powerpc/platforms/cell/spufs/hw_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
unsigned int events)
{
struct spu *spu = ctx->spu;
struct spu_priv1 __iomem *priv1 = spu->priv1;
int ret = 0;
u32 stat;

Expand All @@ -78,18 +77,16 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM;
else {
out_be64(&priv1->int_stat_class2_RW, 0x1);
out_be64(&priv1->int_mask_class2_RW,
in_be64(&priv1->int_mask_class2_RW) | 0x1);
spu_int_stat_clear(spu, 2, 0x1);
spu_int_mask_or(spu, 2, 0x1);
}
}
if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM;
else {
out_be64(&priv1->int_stat_class2_RW, 0x10);
out_be64(&priv1->int_mask_class2_RW,
in_be64(&priv1->int_mask_class2_RW) | 0x10);
spu_int_stat_clear(spu, 2, 0x10);
spu_int_mask_or(spu, 2, 0x10);
}
}
spin_unlock_irq(&spu->register_lock);
Expand All @@ -100,7 +97,6 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
{
struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem;
struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2;
int ret;

Expand All @@ -111,8 +107,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4;
} else {
/* make sure we get woken up by the interrupt */
out_be64(&priv1->int_mask_class2_RW,
in_be64(&priv1->int_mask_class2_RW) | 0x1);
spu_int_mask_or(spu, 2, 0x1);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);
Expand All @@ -123,7 +118,6 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
{
struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem;
struct spu_priv1 __iomem *priv1 = spu->priv1;
int ret;

spin_lock_irq(&spu->register_lock);
Expand All @@ -134,8 +128,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
} else {
/* make sure we get woken up by the interrupt when space
becomes available */
out_be64(&priv1->int_mask_class2_RW,
in_be64(&priv1->int_mask_class2_RW) | 0x10);
spu_int_mask_or(spu, 2, 0x10);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);
Expand Down
Loading

0 comments on commit f0831ac

Please sign in to comment.