Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 176474
b: refs/heads/master
c: 9f5a562
h: refs/heads/master
v: v3
  • Loading branch information
Thomas Gleixner committed Dec 14, 2009
1 parent 2966c6a commit 1057747
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d209d74d52ab39dc071656533cac095294f70de7
refs/heads/master: 9f5a5621e78cf48d86682a71ceb3fcdbde38b222
32 changes: 16 additions & 16 deletions trunk/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);

static struct {
struct list_head queue;
spinlock_t lock;
raw_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
.queue = LIST_HEAD_INIT(call_function.queue),
.lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
.lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
};

enum {
Expand All @@ -35,7 +35,7 @@ struct call_function_data {

struct call_single_queue {
struct list_head list;
spinlock_t lock;
raw_spinlock_t lock;
};

static DEFINE_PER_CPU(struct call_function_data, cfd_data);
Expand Down Expand Up @@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);

spin_lock_init(&q->lock);
raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}

Expand Down Expand Up @@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
unsigned long flags;
int ipi;

spin_lock_irqsave(&dst->lock, flags);
raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
spin_unlock_irqrestore(&dst->lock, flags);
raw_spin_unlock_irqrestore(&dst->lock, flags);

/*
* The list addition should be visible before sending the IPI
Expand Down Expand Up @@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
spin_lock(&call_function.lock);
raw_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
spin_unlock(&call_function.lock);
raw_spin_unlock(&call_function.lock);
}

if (refs)
Expand All @@ -230,9 +230,9 @@ void generic_smp_call_function_single_interrupt(void)
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));

spin_lock(&q->lock);
raw_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
spin_unlock(&q->lock);
raw_spin_unlock(&q->lock);

while (!list_empty(&list)) {
struct call_single_data *data;
Expand Down Expand Up @@ -449,14 +449,14 @@ void smp_call_function_many(const struct cpumask *mask,
cpumask_clear_cpu(this_cpu, data->cpumask);
atomic_set(&data->refs, cpumask_weight(data->cpumask));

spin_lock_irqsave(&call_function.lock, flags);
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
spin_unlock_irqrestore(&call_function.lock, flags);
raw_spin_unlock_irqrestore(&call_function.lock, flags);

/*
* Make the list addition visible before sending the ipi.
Expand Down Expand Up @@ -501,20 +501,20 @@ EXPORT_SYMBOL(smp_call_function);

void ipi_call_lock(void)
{
spin_lock(&call_function.lock);
raw_spin_lock(&call_function.lock);
}

void ipi_call_unlock(void)
{
spin_unlock(&call_function.lock);
raw_spin_unlock(&call_function.lock);
}

void ipi_call_lock_irq(void)
{
spin_lock_irq(&call_function.lock);
raw_spin_lock_irq(&call_function.lock);
}

void ipi_call_unlock_irq(void)
{
spin_unlock_irq(&call_function.lock);
raw_spin_unlock_irq(&call_function.lock);
}

0 comments on commit 1057747

Please sign in to comment.