Skip to content

Commit

Permalink
x86: smp_64.c: remove unused exports and cleanup while at it
Browse files Browse the repository at this point in the history
The exports are nowhere used. There is even no reason why they were
ever introduced.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Jan 30, 2008
1 parent 081e10b commit 16da2f9
Showing 1 changed file with 35 additions and 39 deletions.
74 changes: 35 additions & 39 deletions arch/x86/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,23 @@
#include <asm/idle.h>

/*
* Smarter SMP flushing macros.
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts. (Its not allowed anyway).
*
* Optimizations Manfred Spraul <manfred@colorfullife.com>
*
* More scalable flush, from Andi Kleen
* More scalable flush, from Andi Kleen
*
* To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into
* To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into
* the right per cpu variable for the flush data.
*
* With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now.
* With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now.
* In future when interrupts are split into per CPU domains this could be
* fixed, at the cost of triggering multiple IPIs in some cases.
*/
Expand All @@ -67,7 +67,7 @@ union smp_flush_state {
static DEFINE_PER_CPU(union smp_flush_state, flush_state);

/*
* We cannot call mmdrop() because we are in interrupt context,
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*/
static inline void leave_mm(int cpu)
Expand All @@ -85,25 +85,25 @@ static inline void leave_mm(int cpu)
* 1) switch_mm() either 1a) or 1b)
* 1a) thread switch to a different mm
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superfluous
* tlb flush.
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superfluous
* tlb flush.
* 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* was in lazy tlb mode.
* 1a3) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm.
* Now cpu0 accepts tlb flushes for the new mm.
* 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis.
* Now the other cpus will send tlb flush ipis.
* 1a4) change cr3.
* 1b) thread switch without mm change
* cpu active_mm is correct, cpu0 already handles
* flush ipis.
* 1b1) set cpu mmu_state to TLBSTATE_OK
* 1b2) test_and_set the cpu bit in cpu_vm_mask.
* Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit.
* Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit.
* 1b3) if the bit was 0: leave_mm was called, flush the tlb.
* 2) switch %%esp, ie current
*
Expand Down Expand Up @@ -142,15 +142,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)

if (!cpu_isset(cpu, f->flush_cpumask))
goto out;
/*
/*
* This was a BUG() but until someone can quote me the
* line from the intel manual that guarantees an IPI to
* multiple CPUs is retried _only_ on the erroring CPUs
* its staying as a return
*
* BUG();
*/

if (f->flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) {
if (f->flush_va == FLUSH_ALL)
Expand All @@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender);

/* Could avoid this lock when
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
probably not worth checking this for a cache-hot lock. */
/*
* Could avoid this lock when
* num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
* probably not worth checking this for a cache-hot lock.
*/
spin_lock(&f->tlbstate_lock);

f->flush_mm = mm;
Expand All @@ -202,14 +204,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
int __cpuinit init_smp_flush(void)
{
int i;

for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
}
return 0;
}

core_initcall(init_smp_flush);

void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
Expand All @@ -224,7 +226,6 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_current_task);

void flush_tlb_mm (struct mm_struct * mm)
{
Expand All @@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm)

preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
Expand All @@ -259,16 +259,15 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
if (current->active_mm == mm) {
if(current->mm)
__flush_tlb_one(va);
else
leave_mm(smp_processor_id());
else
leave_mm(smp_processor_id());
}

if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, va);

preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);

static void do_flush_tlb_all(void* info)
{
Expand Down Expand Up @@ -325,11 +324,9 @@ void unlock_ipi_call_lock(void)
* this function sends a 'generic call function' IPI to all other CPU
* of the system defined in the mask.
*/

static int
__smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
static int __smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
Expand Down Expand Up @@ -417,11 +414,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
*/

int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
int ret, me = get_cpu();

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
Expand Down Expand Up @@ -471,9 +467,9 @@ static void stop_this_cpu(void *dummy)
*/
cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC();
for (;;)
for (;;)
halt();
}
}

void smp_send_stop(void)
{
Expand Down

0 comments on commit 16da2f9

Please sign in to comment.