Skip to content

Commit

Permalink
x86: convert to generic helpers for IPI function calls
Browse files Browse the repository at this point in the history
This converts x86, x86-64, and xen to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().

Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
  • Loading branch information
Jens Axboe committed Jun 26, 2008
1 parent 3d44223 commit 3b16cf8
Show file tree
Hide file tree
Showing 20 changed files with 125 additions and 380 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ config GENERIC_PENDING_IRQ
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y

config X86_32_SMP
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/kernel/apic_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -1358,6 +1358,10 @@ void __init smp_intr_init(void)

/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);

/* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
}
#endif

Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -711,6 +711,9 @@ END(invalidate_interrupt\num)
ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt)
ENTRY(call_function_single_interrupt)
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
END(call_function_single_interrupt)
ENTRY(irq_move_cleanup_interrupt)
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt)
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/kernel/i8259_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -494,6 +494,10 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);

/* IPI for generic single function call */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);

/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif
Expand Down
158 changes: 20 additions & 138 deletions arch/x86/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu)
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}

/*
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);

struct call_data_struct {
void (*func) (void *info);
void *info;
atomic_t started;
atomic_t finished;
int wait;
};

void lock_ipi_call_lock(void)
void native_send_call_func_single_ipi(int cpu)
{
spin_lock_irq(&call_lock);
}

void unlock_ipi_call_lock(void)
{
spin_unlock_irq(&call_lock);
}

static struct call_data_struct *call_data;

static void __smp_call_function(void (*func) (void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus() - 1;

if (!cpus)
return;

data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);

call_data = &data;
mb();

/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);

/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();

if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}


/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static int
native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
void native_send_call_func_ipi(cpumask_t mask)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());

/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);

allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);

cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);

if (!cpus) {
spin_unlock(&call_lock);
return 0;
}

data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);

call_data = &data;
wmb();

/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself) &&
cpus_equal(cpu_online_map, cpu_callout_map))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);

/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();

if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
spin_unlock(&call_lock);

return 0;
}

static void stop_this_cpu(void *dummy)
Expand All @@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy)

static void native_smp_send_stop(void)
{
int nolock;
unsigned long flags;

if (reboot_force)
return;

/* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock);
smp_call_function(stop_this_cpu, NULL, 0, 0);
local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
local_irq_restore(flags);
}
Expand All @@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)

void smp_call_function_interrupt(struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;

ack_APIC_irq();
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
*/
mb();
atomic_inc(&call_data->started);
/*
* At this point the info structure may be out of scope unless wait==1
*/
irq_enter();
(*func)(info);
generic_smp_call_function_interrupt();
#ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit();
}

if (wait) {
mb();
atomic_inc(&call_data->finished);
}
void smp_call_function_single_interrupt(void)
{
ack_APIC_irq();
irq_enter();
generic_smp_call_function_single_interrupt();
#ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit();
}

struct smp_ops smp_ops = {
Expand All @@ -338,7 +219,8 @@ struct smp_ops smp_ops = {

.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask,

.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
};
EXPORT_SYMBOL_GPL(smp_ops);

4 changes: 2 additions & 2 deletions arch/x86/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ static void __cpuinit start_secondary(void *unused)
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
*/
lock_ipi_call_lock();
ipi_call_lock_irq();
#ifdef CONFIG_X86_64
spin_lock(&vector_lock);

Expand All @@ -357,7 +357,7 @@ static void __cpuinit start_secondary(void *unused)
spin_unlock(&vector_lock);
#endif
cpu_set(smp_processor_id(), cpu_online_map);
unlock_ipi_call_lock();
ipi_call_unlock_irq();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;

setup_secondary_clock();
Expand Down
56 changes: 0 additions & 56 deletions arch/x86/kernel/smpcommon.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
per_cpu(cpu_number, cpu) = cpu;
}
#endif

/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);

/**
* smp_call_function_single - Run a function on a specific CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
local_irq_disable();
func(info);
local_irq_enable();
put_cpu();
return 0;
}

ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);

put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
Loading

0 comments on commit 3b16cf8

Please sign in to comment.