Skip to content

Commit

Permalink
[CELL] spufs: rework list management and associated locking
Browse files Browse the repository at this point in the history
This sorts out the various lists and related locks in the spu code.

In detail:

 - the per-node free_spus and active_list are gone.  Instead struct spu
   gained an alloc_state member telling whether the spu is free or not
 - the per-node spus array is now locked by a per-node mutex, which
   takes over from the global spu_lock and the per-node active_mutex
 - the spu_alloc* and spu_free function are gone as the state change is
   now done inline in the spufs code.  This allows some more sharing of
   code for the affinity vs normal case and more efficient locking
 - some little refactoring in the affinity code for this locking scheme

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
  • Loading branch information
Christoph Hellwig authored and Arnd Bergmann committed Jul 20, 2007
1 parent 1474855 commit 486acd4
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 169 deletions.
72 changes: 7 additions & 65 deletions arch/powerpc/platforms/cell/spu_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ static void spu_free_irqs(struct spu *spu)
free_irq(spu->irqs[2], spu);
}

static void spu_init_channels(struct spu *spu)
void spu_init_channels(struct spu *spu)
{
static const struct {
unsigned channel;
Expand Down Expand Up @@ -442,66 +442,7 @@ static void spu_init_channels(struct spu *spu)
out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
}
}

struct spu *spu_alloc_spu(struct spu *req_spu)
{
struct spu *spu, *ret = NULL;

spin_lock(&spu_lock);
list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) {
if (spu == req_spu) {
list_del_init(&spu->list);
pr_debug("Got SPU %d %d\n", spu->number, spu->node);
spu_init_channels(spu);
ret = spu;
break;
}
}
spin_unlock(&spu_lock);
return ret;
}
EXPORT_SYMBOL_GPL(spu_alloc_spu);

struct spu *spu_alloc_node(int node)
{
struct spu *spu = NULL;

spin_lock(&spu_lock);
if (!list_empty(&cbe_spu_info[node].free_spus)) {
spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu,
list);
list_del_init(&spu->list);
pr_debug("Got SPU %d %d\n", spu->number, spu->node);
}
spin_unlock(&spu_lock);

if (spu)
spu_init_channels(spu);
return spu;
}
EXPORT_SYMBOL_GPL(spu_alloc_node);

struct spu *spu_alloc(void)
{
struct spu *spu = NULL;
int node;

for (node = 0; node < MAX_NUMNODES; node++) {
spu = spu_alloc_node(node);
if (spu)
break;
}

return spu;
}

void spu_free(struct spu *spu)
{
spin_lock(&spu_lock);
list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus);
spin_unlock(&spu_lock);
}
EXPORT_SYMBOL_GPL(spu_free);
EXPORT_SYMBOL_GPL(spu_init_channels);

static int spu_shutdown(struct sys_device *sysdev)
{
Expand Down Expand Up @@ -597,6 +538,8 @@ static int __init create_spu(void *data)
if (!spu)
goto out;

spu->alloc_state = SPU_FREE;

spin_lock_init(&spu->register_lock);
spin_lock(&spu_lock);
spu->number = number++;
Expand All @@ -617,11 +560,10 @@ static int __init create_spu(void *data)
if (ret)
goto out_free_irqs;

spin_lock(&spu_lock);
list_add(&spu->list, &cbe_spu_info[spu->node].free_spus);
mutex_lock(&cbe_spu_info[spu->node].list_mutex);
list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
cbe_spu_info[spu->node].n_spus++;
spin_unlock(&spu_lock);
mutex_unlock(&cbe_spu_info[spu->node].list_mutex);

mutex_lock(&spu_full_list_mutex);
spin_lock_irqsave(&spu_full_list_lock, flags);
Expand Down Expand Up @@ -831,8 +773,8 @@ static int __init init_spu_base(void)
int i, ret = 0;

for (i = 0; i < MAX_NUMNODES; i++) {
mutex_init(&cbe_spu_info[i].list_mutex);
INIT_LIST_HEAD(&cbe_spu_info[i].spus);
INIT_LIST_HEAD(&cbe_spu_info[i].free_spus);
}

if (!spu_management_ops)
Expand Down
Loading

0 comments on commit 486acd4

Please sign in to comment.