Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 8599
b: refs/heads/master
c: e5bc8b6
h: refs/heads/master
i:
  8597: 8ebfae1
  8595: a1cd129
  8591: bf71768
v: v3
  • Loading branch information
Andi Kleen authored and Linus Torvalds committed Sep 12, 2005
1 parent 45c7603 commit b6a9bc4
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 83b942bd3437d84d6ddf582477120b6b86369052
refs/heads/master: e5bc8b6baf86538f3d0407cf0880f86aec828a13
15 changes: 13 additions & 2 deletions trunk/arch/x86_64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -536,8 +536,19 @@ ENTRY(thermal_interrupt)
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt

ENTRY(invalidate_interrupt)
apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
.macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num)
apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
.endm

INVALIDATE_ENTRY 0
INVALIDATE_ENTRY 1
INVALIDATE_ENTRY 2
INVALIDATE_ENTRY 3
INVALIDATE_ENTRY 4
INVALIDATE_ENTRY 5
INVALIDATE_ENTRY 6
INVALIDATE_ENTRY 7

ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
Expand Down
20 changes: 17 additions & 3 deletions trunk/arch/x86_64/kernel/i8259.c
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,14 @@ void spurious_interrupt(void);
void error_interrupt(void);
void reschedule_interrupt(void);
void call_function_interrupt(void);
void invalidate_interrupt(void);
void invalidate_interrupt0(void);
void invalidate_interrupt1(void);
void invalidate_interrupt2(void);
void invalidate_interrupt3(void);
void invalidate_interrupt4(void);
void invalidate_interrupt5(void);
void invalidate_interrupt6(void);
void invalidate_interrupt7(void);
void thermal_interrupt(void);
void i8254_timer_resume(void);

Expand Down Expand Up @@ -562,8 +569,15 @@ void __init init_IRQ(void)
*/
set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);

/* IPI for invalidation */
set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
/* IPIs for invalidation */
set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);

/* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
Expand Down
123 changes: 78 additions & 45 deletions trunk/arch/x86_64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
#include <asm/proto.h>
#include <asm/apicdef.h>

#define __cpuinit __init

/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
Expand All @@ -37,19 +39,41 @@
* writing to user space from interrupts. (Its not allowed anyway).
*
* Optimizations Manfred Spraul <manfred@colorfullife.com>
*
* More scalable flush, from Andi Kleen
*
* To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into
* the right per cpu variable for the flush data.
*
* With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now.
* In future when interrupts are split into per CPU domains this could be
* fixed, at the cost of triggering multiple IPIs in some cases.
*/

static cpumask_t flush_cpumask;
static struct mm_struct * flush_mm;
static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
union smp_flush_state {
struct {
cpumask_t flush_cpumask;
struct mm_struct *flush_mm;
unsigned long flush_va;
#define FLUSH_ALL -1ULL
spinlock_t tlbstate_lock;
};
char pad[SMP_CACHE_BYTES];
} ____cacheline_aligned;

/* State is put into the per CPU data section, but padded
to a full cache line because other CPUs can access it and we don't
want false sharing in the per cpu data segment. */
static DEFINE_PER_CPU(union smp_flush_state, flush_state);

/*
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*/
static inline void leave_mm (unsigned long cpu)
static inline void leave_mm(int cpu)
{
if (read_pda(mmu_state) == TLBSTATE_OK)
BUG();
Expand Down Expand Up @@ -101,15 +125,25 @@ static inline void leave_mm (unsigned long cpu)
*
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
*
* Interrupts are disabled.
*/

asmlinkage void smp_invalidate_interrupt (void)
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{
unsigned long cpu;
int cpu;
int sender;
union smp_flush_state *f;

cpu = get_cpu();
cpu = smp_processor_id();
/*
* orig_rax contains the interrupt vector - 256.
* Use that to determine where the sender put the data.
*/
sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);

if (!cpu_isset(cpu, flush_cpumask))
if (!cpu_isset(cpu, f->flush_cpumask))
goto out;
/*
* This was a BUG() but until someone can quote me the
Expand All @@ -120,64 +154,63 @@ asmlinkage void smp_invalidate_interrupt (void)
* BUG();
*/

if (flush_mm == read_pda(active_mm)) {
if (f->flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL)
if (f->flush_va == FLUSH_ALL)
local_flush_tlb();
else
__flush_tlb_one(flush_va);
__flush_tlb_one(f->flush_va);
} else
leave_mm(cpu);
}
out:
ack_APIC_irq();
cpu_clear(cpu, flush_cpumask);
put_cpu_no_resched();
cpu_clear(cpu, f->flush_cpumask);
}

static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
cpumask_t tmp;
/*
* A couple of (to be removed) sanity checks:
*
* - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
cpus_and(tmp, cpumask, cpu_online_map);
BUG_ON(!cpus_equal(tmp, cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
if (!mm)
BUG();
int sender;
union smp_flush_state *f;

/*
* I'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog.
*/
spin_lock(&tlbstate_lock);

flush_mm = mm;
flush_va = va;
cpus_or(flush_cpumask, cpumask, flush_cpumask);
/* Caller has disabled preemption */
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender);

/* Could avoid this lock when
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
probably not worth checking this for a cache-hot lock. */
spin_lock(&f->tlbstate_lock);

f->flush_mm = mm;
f->flush_va = va;
cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

/*
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);

while (!cpus_empty(flush_cpumask))
mb(); /* nothing. lockup detection does not belong here */;
while (!cpus_empty(f->flush_cpumask))
cpu_relax();

flush_mm = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
f->flush_mm = NULL;
f->flush_va = 0;
spin_unlock(&f->tlbstate_lock);
}

int __cpuinit init_smp_flush(void)
{
int i;
for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
}
return 0;
}

core_initcall(init_smp_flush);

void flush_tlb_current_task(void)
{
Expand Down
17 changes: 9 additions & 8 deletions trunk/include/asm-x86_64/hw_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,15 @@ struct hw_interrupt_type;
*/
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc
#define TASK_MIGRATION_VECTOR 0xfb
#define CALL_FUNCTION_VECTOR 0xfa
#define KDB_VECTOR 0xf9

#define THERMAL_APIC_VECTOR 0xf0

#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
#define KDB_VECTOR 0xfb /* reserved for KDB */
#define THERMAL_APIC_VECTOR 0xfa
/* 0xf9 free */
#define INVALIDATE_TLB_VECTOR_END 0xf8
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */

#define NUM_INVALIDATE_TLB_VECTORS 8

/*
* Local APIC timer IRQ vector is on a different priority level,
Expand Down

0 comments on commit b6a9bc4

Please sign in to comment.