Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 81149
b: refs/heads/master
c: e65c2f6
h: refs/heads/master
i:
  81147: bcba801
v: v3
  • Loading branch information
Luke Browning authored and Paul Mackerras committed Dec 21, 2007
1 parent deaae60 commit d575e26
Show file tree
Hide file tree
Showing 6 changed files with 201 additions and 169 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9476141c185aa131fa8b4b6ccc5c0ccf92300225
refs/heads/master: e65c2f6fcebb9af0c3f53c796aff730dd657f5e7
31 changes: 0 additions & 31 deletions trunk/arch/powerpc/platforms/cell/spufs/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,37 +133,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
mutex_unlock(&ctx->mapping_lock);
}

/**
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{
int ret = -EINVAL;

spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
/*
* Context is about to be freed, so we can't acquire it anymore.
*/
if (!ctx->owner)
goto out_unlock;
ret = spu_activate(ctx, flags);
if (ret)
goto out_unlock;
}

return 0;

out_unlock:
spu_release(ctx);
return ret;
}

/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
Expand Down
9 changes: 9 additions & 0 deletions trunk/arch/powerpc/platforms/cell/spufs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -572,6 +572,9 @@ void spufs_ibox_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;

if (!ctx)
return;

wake_up_all(&ctx->ibox_wq);
kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
}
Expand Down Expand Up @@ -708,6 +711,9 @@ void spufs_wbox_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;

if (!ctx)
return;

wake_up_all(&ctx->wbox_wq);
kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
}
Expand Down Expand Up @@ -1339,6 +1345,9 @@ void spufs_mfc_callback(struct spu *spu)
{
struct spu_context *ctx = spu->ctx;

if (!ctx)
return;

wake_up_all(&ctx->mfc_wq);

pr_debug("%s %s\n", __FUNCTION__, spu->name);
Expand Down
90 changes: 36 additions & 54 deletions trunk/arch/powerpc/platforms/cell/spufs/run.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,21 +41,29 @@ void spufs_stop_callback(struct spu *spu)
spu->dar = 0;
}

static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
int spu_stopped(struct spu_context *ctx, u32 *stat)
{
struct spu *spu;
u64 pte_fault;
u64 dsisr;
u32 stopped;

*stat = ctx->ops->status_read(ctx);

spu = ctx->spu;
if (ctx->state != SPU_STATE_RUNNABLE ||
test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
return 1;

stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
if (*stat & stopped)
return 1;

dsisr = ctx->csa.dsisr;
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
return 1;
pte_fault = ctx->csa.dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
1 : 0;

if (ctx->csa.class_0_pending)
return 1;

return 0;
}

static int spu_setup_isolated(struct spu_context *ctx)
Expand Down Expand Up @@ -151,24 +159,27 @@ static int spu_setup_isolated(struct spu_context *ctx)

static int spu_run_init(struct spu_context *ctx, u32 *npc)
{
unsigned long runcntl;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
int ret;

spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);

if (ctx->flags & SPU_CREATE_ISOLATE) {
/*
* Force activation of spu. Isolated state assumes that
* special loader context is loaded and running on spu.
*/
/*
* NOSCHED is synchronous scheduling with respect to the caller.
* The caller waits for the context to be loaded.
*/
if (ctx->flags & SPU_CREATE_NOSCHED) {
if (ctx->state == SPU_STATE_SAVED) {
spu_set_timeslice(ctx);

ret = spu_activate(ctx, 0);
if (ret)
return ret;
}
}

/*
* Apply special setup as required.
*/
if (ctx->flags & SPU_CREATE_ISOLATE) {
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
ret = spu_setup_isolated(ctx);
if (ret)
Expand All @@ -183,31 +194,30 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE;
}

if (ctx->flags & SPU_CREATE_NOSCHED) {
spuctx_switch_state(ctx, SPU_UTIL_USER);
ctx->ops->runcntl_write(ctx, runcntl);

} else {
unsigned long privcntl;

if (test_thread_flag(TIF_SINGLESTEP))
privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
else
privcntl = SPU_PRIVCNTL_MODE_NORMAL;
runcntl = SPU_RUNCNTL_RUNNABLE;

ctx->ops->npc_write(ctx, *npc);
ctx->ops->privcntl_write(ctx, privcntl);
ctx->ops->runcntl_write(ctx, runcntl);

if (ctx->state == SPU_STATE_SAVED) {
spu_set_timeslice(ctx);
ret = spu_activate(ctx, 0);
if (ret)
return ret;
} else {
spuctx_switch_state(ctx, SPU_UTIL_USER);
}

spuctx_switch_state(ctx, SPU_UTIL_USER);
ctx->ops->runcntl_write(ctx, runcntl);
}

return 0;
Expand All @@ -218,6 +228,8 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
{
int ret = 0;

spu_del_from_rq(ctx);

*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);

Expand All @@ -230,26 +242,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
return ret;
}

static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
u32 *status)
{
int ret;

ret = spu_run_fini(ctx, npc, status);
if (ret)
return ret;

if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
return *status;

ret = spu_acquire_runnable(ctx, 0);
if (ret)
return ret;

spuctx_switch_state(ctx, SPU_UTIL_USER);
return 0;
}

/*
* SPU syscall restarting is tricky because we violate the basic
* assumption that the signal handler is running on the interrupted
Expand Down Expand Up @@ -386,17 +378,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
if (ret)
break;

if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret)
goto out2;
continue;
}

if (signal_pending(current))
ret = -ERESTARTSYS;


} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT |
SPU_STATUS_SINGLE_STEP)));
Expand All @@ -411,7 +394,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);

out2:
if ((ret == 0) ||
((ret == -ERESTARTSYS) &&
((status & SPU_STATUS_STOPPED_BY_HALT) ||
Expand Down
Loading

0 comments on commit d575e26

Please sign in to comment.