Skip to content

Commit

Permalink
[IA64] Implement smp_call_function_mask for ia64
Browse files Browse the repository at this point in the history
This interface provides more flexible functionality for smp
infrastructure ... e.g. KVM frequently needs to operate on
a subset of cpus.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
  • Loading branch information
Xiantao Zhang authored and Tony Luck committed Apr 3, 2008
1 parent 9665189 commit 31a6b11
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 0 deletions.
82 changes: 82 additions & 0 deletions arch/ia64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,19 @@ send_IPI_allbutself (int op)
}
}

/*
* Called with preemption disabled.
*/
static inline void
send_IPI_mask(cpumask_t mask, int op)
{
unsigned int cpu;

for_each_cpu_mask(cpu, mask) {
send_IPI_single(cpu, op);
}
}

/*
* Called with preemption disabled.
*/
Expand Down Expand Up @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
}
EXPORT_SYMBOL(smp_call_function_single);

/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* <mask> The set of cpus to run on. Must not include the current cpu.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <wait> If true, wait (atomically) until function
* has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;

spin_lock(&call_lock);
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);

cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());

data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);

call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/

/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(IPI_CALL_FUNC);
else
send_IPI_mask(mask, IPI_CALL_FUNC);

/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();

if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
call_data = NULL;

spin_unlock(&call_lock);
return 0;

}
EXPORT_SYMBOL(smp_call_function_mask);

/*
* this function sends a 'generic call function' IPI to all other CPUs
* in the system.
Expand Down
3 changes: 3 additions & 0 deletions include/asm-ia64/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}

extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);

#define hard_smp_processor_id() ia64_get_lid()

#ifdef CONFIG_SMP
Expand Down

0 comments on commit 31a6b11

Please sign in to comment.