From bae70b70841c95a0f4692875065374924e2f774e Mon Sep 17 00:00:00 2001 From: "Julio M. Merino Vidal" Date: Thu, 20 Dec 2007 16:39:59 +0900 Subject: [PATCH] --- yaml --- r: 81139 b: refs/heads/master c: 9b1d21f858e8bad750ab19cac23dcbf79d099be3 h: refs/heads/master i: 81137: d7967e702b6b5dac82e48391162593aa516ed447 81135: 0ea70169ebdeea297184ae2bb682c85b6aec4194 v: v3 --- [refs] | 2 +- trunk/arch/powerpc/platforms/cell/spufs/sched.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/[refs] b/[refs] index d6be2d1a21f5..ce77581acf58 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c25620d7663fef41c373d42c4923c1d6b9847684 +refs/heads/master: 9b1d21f858e8bad750ab19cac23dcbf79d099be3 diff --git a/trunk/arch/powerpc/platforms/cell/spufs/sched.c b/trunk/arch/powerpc/platforms/cell/spufs/sched.c index 4d257b3f9336..0117eb8f6a91 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/sched.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/sched.c @@ -105,15 +105,15 @@ void spu_set_timeslice(struct spu_context *ctx) void __spu_update_sched_info(struct spu_context *ctx) { /* - * 32-Bit assignment are atomic on powerpc, and we don't care about - * memory ordering here because retriving the controlling thread is - * per defintion racy. + * 32-Bit assignments are atomic on powerpc, and we don't care about + * memory ordering here because retrieving the controlling thread is + * per definition racy. */ ctx->tid = current->pid; /* * We do our own priority calculations, so we normally want - * ->static_prio to start with. Unfortunately thies field + * ->static_prio to start with. Unfortunately this field * contains junk for threads with a realtime scheduling * policy so we have to look at ->prio in this case. */ @@ -127,7 +127,7 @@ void __spu_update_sched_info(struct spu_context *ctx) * A lot of places that don't hold list_mutex poke into * cpus_allowed, including grab_runnable_context which * already holds the runq_lock. So abuse runq_lock - * to protect this field aswell. + * to protect this field as well. */ spin_lock(&spu_prio->runq_lock); ctx->cpus_allowed = current->cpus_allowed; @@ -182,7 +182,7 @@ static void notify_spus_active(void) * Wake up the active spu_contexts. * * When the awakened processes see their "notify_active" flag is set, - * they will call spu_switch_notify(); + * they will call spu_switch_notify(). */ for_each_online_node(node) { struct spu *spu; @@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx) /* * Look for a possible preemption candidate on the local node first. * If there is no candidate look at the other nodes. This isn't - * exactly fair, but so far the whole spu schedule tries to keep + * exactly fair, but so far the whole spu scheduler tries to keep * a strong node affinity. We might want to fine-tune this in * the future. */ @@ -905,7 +905,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) /* * Note that last_pid doesn't really make much sense for the - * SPU loadavg (it even seems very odd on the CPU side..), + * SPU loadavg (it even seems very odd on the CPU side...), * but we include it here to have a 100% compatible interface. */ seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",