Skip to content

Commit

Permalink
[POWERPC] spusched: fix cpu/node binding
Browse files Browse the repository at this point in the history
Add a cpus_allowed allowed filed to struct spu_context so that we always
use the cpu mask of the owning thread instead of the one happening to
call into the scheduler.  Also use this information in
grab_runnable_context to avoid spurious wakeups.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Christoph Hellwig authored and Paul Mackerras committed Jul 3, 2007
1 parent 2cf2b3b commit ea1ae59
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 22 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/cell/spufs/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
INIT_LIST_HEAD(&ctx->rq);
if (gang)
spu_gang_add_ctx(gang, ctx);

ctx->cpus_allowed = current->cpus_allowed;
spu_set_timeslice(ctx);
goto out;
out_free:
Expand Down
70 changes: 49 additions & 21 deletions arch/powerpc/platforms/cell/spufs/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,16 @@ void __spu_update_sched_info(struct spu_context *ctx)
else
ctx->prio = current->static_prio;
ctx->policy = current->policy;

/*
* A lot of places that don't hold active_mutex poke into
* cpus_allowed, including grab_runnable_context which
* already holds the runq_lock. So abuse runq_lock
* to protect this field aswell.
*/
spin_lock(&spu_prio->runq_lock);
ctx->cpus_allowed = current->cpus_allowed;
spin_unlock(&spu_prio->runq_lock);
}

void spu_update_sched_info(struct spu_context *ctx)
Expand All @@ -123,16 +133,27 @@ void spu_update_sched_info(struct spu_context *ctx)
mutex_unlock(&spu_prio->active_mutex[node]);
}

static inline int node_allowed(int node)
static int __node_allowed(struct spu_context *ctx, int node)
{
cpumask_t mask;
if (nr_cpus_node(node)) {
cpumask_t mask = node_to_cpumask(node);

if (!nr_cpus_node(node))
return 0;
mask = node_to_cpumask(node);
if (!cpus_intersects(mask, current->cpus_allowed))
return 0;
return 1;
if (cpus_intersects(mask, ctx->cpus_allowed))
return 1;
}

return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
int rval;

spin_lock(&spu_prio->runq_lock);
rval = __node_allowed(ctx, node);
spin_unlock(&spu_prio->runq_lock);

return rval;
}

/**
Expand Down Expand Up @@ -289,7 +310,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)

for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(node))
if (!node_allowed(ctx, node))
continue;
spu = spu_alloc_node(node);
if (spu)
Expand Down Expand Up @@ -321,7 +342,7 @@ static struct spu *find_victim(struct spu_context *ctx)
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(node))
if (!node_allowed(ctx, node))
continue;

mutex_lock(&spu_prio->active_mutex[node]);
Expand Down Expand Up @@ -416,23 +437,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
* Remove the highest priority context on the runqueue and return it
* to the caller. Returns %NULL if no runnable context was found.
*/
static struct spu_context *grab_runnable_context(int prio)
static struct spu_context *grab_runnable_context(int prio, int node)
{
struct spu_context *ctx = NULL;
struct spu_context *ctx;
int best;

spin_lock(&spu_prio->runq_lock);
best = sched_find_first_bit(spu_prio->bitmap);
if (best < prio) {
while (best < prio) {
struct list_head *rq = &spu_prio->runq[best];

BUG_ON(list_empty(rq));

ctx = list_entry(rq->next, struct spu_context, rq);
__spu_del_from_rq(ctx);
list_for_each_entry(ctx, rq, rq) {
/* XXX(hch): check for affinity here aswell */
if (__node_allowed(ctx, node)) {
__spu_del_from_rq(ctx);
goto found;
}
}
best++;
}
ctx = NULL;
found:
spin_unlock(&spu_prio->runq_lock);

return ctx;
}

Expand All @@ -442,7 +468,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
struct spu_context *new = NULL;

if (spu) {
new = grab_runnable_context(max_prio);
new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
spu_remove_from_active_list(spu);
spu_unbind_context(spu, ctx);
Expand Down Expand Up @@ -496,9 +522,11 @@ static void spusched_tick(struct spu_context *ctx)
* tick and try again.
*/
if (mutex_trylock(&ctx->state_mutex)) {
struct spu_context *new = grab_runnable_context(ctx->prio + 1);
struct spu *spu = ctx->spu;
struct spu_context *new;

new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
struct spu *spu = ctx->spu;

__spu_remove_from_active_list(spu);
spu_unbind_context(spu, ctx);
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/platforms/cell/spufs/spufs.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/cpumask.h>

#include <asm/spu.h>
#include <asm/spu_csa.h>
Expand Down Expand Up @@ -80,6 +81,7 @@ struct spu_context {
struct list_head rq;
unsigned int time_slice;
unsigned long sched_flags;
cpumask_t cpus_allowed;
int policy;
int prio;
};
Expand Down

0 comments on commit ea1ae59

Please sign in to comment.