From fa7c5d6aaae82e73e5b9117d415a57e22972e734 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 5 Aug 2011 08:27:49 -0400 Subject: [PATCH] --- yaml --- r: 269141 b: refs/heads/master c: d473750b4073f16f23f46f30dc1bd3de45c35754 h: refs/heads/master i: 269139: eaf877095dcd3d49ac8fff8ac23a8efd56b5f96a v: v3 --- [refs] | 2 +- trunk/kernel/sched_cpupri.c | 28 +++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/[refs] b/[refs] index a11bdba6bb8c..38622f64b62b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c92211d9b772792a9dea530c042efb4ab5562f50 +refs/heads/master: d473750b4073f16f23f46f30dc1bd3de45c35754 diff --git a/trunk/kernel/sched_cpupri.c b/trunk/kernel/sched_cpupri.c index 7761a2669fff..90faffdbdf98 100644 --- a/trunk/kernel/sched_cpupri.c +++ b/trunk/kernel/sched_cpupri.c @@ -73,9 +73,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; + int skip = 0; if (!atomic_read(&(vec)->count)) - continue; + skip = 1; /* * When looking at the vector, we need to read the counter, * do a memory barrier, then read the mask. @@ -96,6 +97,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, */ smp_rmb(); + /* Need to do the rmb for every iteration */ + if (skip) + continue; + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; @@ -134,6 +139,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; + int do_mb = 0; newpri = convert_prio(newpri); @@ -158,18 +164,34 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * do a write memory barrier, and then update the count, to * make sure the vector is visible when count is set. */ - smp_wmb(); + smp_mb__before_atomic_inc(); atomic_inc(&(vec)->count); + do_mb = 1; } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; + /* + * Because the order of modification of the vec->count + * is important, we must make sure that the update + * of the new prio is seen before we decrement the + * old prio. This makes sure that the loop sees + * one or the other when we raise the priority of + * the run queue. We don't care about when we lower the + * priority, as that will trigger an rt pull anyway. + * + * We only need to do a memory barrier if we updated + * the new priority vec. + */ + if (do_mb) + smp_mb__after_atomic_inc(); + /* * When removing from the vector, we decrement the counter first * do a memory barrier and then clear the mask. */ atomic_dec(&(vec)->count); - smp_wmb(); + smp_mb__after_atomic_inc(); cpumask_clear_cpu(cpu, vec->mask); }