Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 17232
b: refs/heads/master
c: ce8ab85
h: refs/heads/master
v: v3
  • Loading branch information
Arnd Bergmann authored and Paul Mackerras committed Jan 9, 2006
1 parent ab047a8 commit a536c2b
Show file tree
Hide file tree
Showing 5 changed files with 161 additions and 154 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8837d9216f99048636fbb2c11347358e99e06181
refs/heads/master: ce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/platforms/cell/spufs/Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o switch.o syscalls.o
spufs-y += sched.o backing_ops.o hw_ops.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o

# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
Expand Down
152 changes: 0 additions & 152 deletions trunk/arch/powerpc/platforms/cell/spufs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,34 +304,6 @@ static struct file_operations spufs_mbox_stat_fops = {
.read = spufs_mbox_stat_read,
};

/*
* spufs_wait
* Same as wait_event_interruptible(), except that here
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*/

#define spufs_wait(wq, condition) \
({ \
int __ret = 0; \
DEFINE_WAIT(__wait); \
for (;;) { \
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
if (condition) \
break; \
if (!signal_pending(current)) { \
spu_release(ctx); \
schedule(); \
spu_acquire(ctx); \
continue; \
} \
__ret = -ERESTARTSYS; \
break; \
} \
finish_wait(&(wq), &__wait); \
__ret; \
})

/* low-level ibox access function */
size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
{
Expand Down Expand Up @@ -529,130 +501,6 @@ static struct file_operations spufs_wbox_stat_fops = {
.read = spufs_wbox_stat_read,
};

/* interrupt-level stop callback function. */
void spufs_stop_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;

wake_up_all(&ctx->stop_wq);
}

static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
{
struct spu *spu;
u64 pte_fault;

*stat = ctx->ops->status_read(ctx);
if (ctx->state != SPU_STATE_RUNNABLE)
return 1;
spu = ctx->spu;
pte_fault = spu->dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
}

static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
u32 * status)
{
int ret;

if ((ret = spu_acquire_runnable(ctx)) != 0)
return ret;
ctx->ops->npc_write(ctx, *npc);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
return 0;
}

static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
u32 * status)
{
int ret = 0;

*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);

if (signal_pending(current))
ret = -ERESTARTSYS;
if (unlikely(current->ptrace & PT_PTRACED)) {
if ((*status & SPU_STATUS_STOPPED_BY_STOP)
&& (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
force_sig(SIGTRAP, current);
ret = -ERESTARTSYS;
}
}
return ret;
}

static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
u32 *status)
{
int ret;

if ((ret = spu_run_fini(ctx, npc, status)) != 0)
return ret;
if (*status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)) {
return *status;
}
if ((ret = spu_run_init(ctx, npc, status)) != 0)
return ret;
return 0;
}

static inline int spu_process_events(struct spu_context *ctx)
{
struct spu *spu = ctx->spu;
u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
int ret = 0;

if (spu->dsisr & pte_fault)
ret = spu_irq_class_1_bottom(spu);
if (spu->class_0_pending)
ret = spu_irq_class_0_bottom(spu);
if (!ret && signal_pending(current))
ret = -ERESTARTSYS;
return ret;
}

long spufs_run_spu(struct file *file, struct spu_context *ctx,
u32 * npc, u32 * status)
{
int ret;

if (down_interruptible(&ctx->run_sema))
return -ERESTARTSYS;

ret = spu_run_init(ctx, npc, status);
if (ret)
goto out;

do {
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
if (unlikely(ret))
break;
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, status);
if (ret)
goto out;
continue;
}
ret = spu_process_events(ctx);

} while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)));

ctx->ops->runcntl_stop(ctx);
ret = spu_run_fini(ctx, npc, status);
if (!ret)
ret = *status;
spu_yield(ctx);

out:
up(&ctx->run_sema);
return ret;
}

static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
Expand Down
131 changes: 131 additions & 0 deletions trunk/arch/powerpc/platforms/cell/spufs/run.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#include <linux/wait.h>
#include <linux/ptrace.h>

#include <asm/spu.h>

#include "spufs.h"

/* interrupt-level stop callback function. */
void spufs_stop_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;

wake_up_all(&ctx->stop_wq);
}

static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
{
struct spu *spu;
u64 pte_fault;

*stat = ctx->ops->status_read(ctx);
if (ctx->state != SPU_STATE_RUNNABLE)
return 1;
spu = ctx->spu;
pte_fault = spu->dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
}

static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
u32 * status)
{
int ret;

if ((ret = spu_acquire_runnable(ctx)) != 0)
return ret;
ctx->ops->npc_write(ctx, *npc);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
return 0;
}

static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
u32 * status)
{
int ret = 0;

*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);

if (signal_pending(current))
ret = -ERESTARTSYS;
if (unlikely(current->ptrace & PT_PTRACED)) {
if ((*status & SPU_STATUS_STOPPED_BY_STOP)
&& (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
force_sig(SIGTRAP, current);
ret = -ERESTARTSYS;
}
}
return ret;
}

static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
u32 *status)
{
int ret;

if ((ret = spu_run_fini(ctx, npc, status)) != 0)
return ret;
if (*status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)) {
return *status;
}
if ((ret = spu_run_init(ctx, npc, status)) != 0)
return ret;
return 0;
}

static inline int spu_process_events(struct spu_context *ctx)
{
struct spu *spu = ctx->spu;
u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
int ret = 0;

if (spu->dsisr & pte_fault)
ret = spu_irq_class_1_bottom(spu);
if (spu->class_0_pending)
ret = spu_irq_class_0_bottom(spu);
if (!ret && signal_pending(current))
ret = -ERESTARTSYS;
return ret;
}

long spufs_run_spu(struct file *file, struct spu_context *ctx,
u32 * npc, u32 * status)
{
int ret;

if (down_interruptible(&ctx->run_sema))
return -ERESTARTSYS;

ret = spu_run_init(ctx, npc, status);
if (ret)
goto out;

do {
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
if (unlikely(ret))
break;
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, status);
if (ret)
goto out;
continue;
}
ret = spu_process_events(ctx);

} while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)));

ctx->ops->runcntl_stop(ctx);
ret = spu_run_fini(ctx, npc, status);
if (!ret)
ret = *status;
spu_yield(ctx);

out:
up(&ctx->run_sema);
return ret;
}

28 changes: 28 additions & 0 deletions trunk/arch/powerpc/platforms/cell/spufs/spufs.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,34 @@ void spu_yield(struct spu_context *ctx);
int __init spu_sched_init(void);
void __exit spu_sched_exit(void);

/*
* spufs_wait
* Same as wait_event_interruptible(), except that here
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*/

#define spufs_wait(wq, condition) \
({ \
int __ret = 0; \
DEFINE_WAIT(__wait); \
for (;;) { \
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
if (condition) \
break; \
if (!signal_pending(current)) { \
spu_release(ctx); \
schedule(); \
spu_acquire(ctx); \
continue; \
} \
__ret = -ERESTARTSYS; \
break; \
} \
finish_wait(&(wq), &__wait); \
__ret; \
})

size_t spu_wbox_write(struct spu_context *ctx, u32 data);
size_t spu_ibox_read(struct spu_context *ctx, u32 *data);

Expand Down

0 comments on commit a536c2b

Please sign in to comment.