Skip to content

Commit

Permalink
x86: change x86_64 smp_call_function_mask to look alike i386
Browse files Browse the repository at this point in the history
the two versions (the inner version, and the outer version, that takes
the locks) of smp_call_function_mask are made into one. With the changes,
i386 and x86_64 versions look exactly the same.

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Glauber Costa authored and Ingo Molnar committed Apr 17, 2008
1 parent 3a36d1e commit 2513926
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 42 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/smp_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask,
atomic_set(&data.finished, 0);

call_data = &data;
mb();
wmb();

/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
Expand Down
57 changes: 16 additions & 41 deletions arch/x86/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info,
}


/*
* this function sends a 'generic call function' IPI to all other CPU
* of the system defined in the mask.
*/
static int __smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
int native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());

/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);

allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);

cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);

if (!cpus)
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}

data.func = func;
data.info = info;
Expand All @@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask,
while (atomic_read(&data.started) != cpus)
cpu_relax();

if (!wait)
return 0;
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();

while (atomic_read(&data.finished) != cpus)
cpu_relax();
spin_unlock(&call_lock);

return 0;
}
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
int ret;

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());

spin_lock(&call_lock);
ret = __smp_call_function_mask(mask, func, info, wait);
spin_unlock(&call_lock);
return ret;
}

static void stop_this_cpu(void *dummy)
{
Expand Down

0 comments on commit 2513926

Please sign in to comment.