Skip to content

Commit

Permalink
mm/rmap: batched invalidations should use existing api
Browse files Browse the repository at this point in the history
The recently introduced batched invalidations mechanism uses its own
mechanism for shootdown.  However, it does wrong accounting of
interrupts (e.g., inc_irq_stat is called for local invalidations),
trace-points (e.g., TLB_REMOTE_SHOOTDOWN for local invalidations) and
may break some platforms as it bypasses the invalidation mechanisms of
Xen and SGI UV.

This patch reuses the existing TLB flushing mechnaisms instead.  We use
NULL as mm to indicate a global invalidation is required.

Fixes 72b252a ("mm: send one IPI per CPU to TLB flush all entries after unmapping pages")
Signed-off-by: Nadav Amit <namit@vmware.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Nadav Amit authored and Linus Torvalds committed Apr 1, 2016
1 parent 18c9824 commit 858eaaa
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 28 deletions.
6 changes: 0 additions & 6 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)

#endif /* SMP */

/* Not inlined due to inc_irq_stat not being defined yet */
#define flush_tlb_local() { \
inc_irq_stat(irq_tlb_count); \
local_flush_tlb(); \
}

#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ static void flush_tlb_func(void *info)

inc_irq_stat(irq_tlb_count);

if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;

count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Expand Down
28 changes: 7 additions & 21 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
}

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
static void percpu_flush_tlb_batch_pages(void *data)
{
/*
* All TLB entries are flushed on the assumption that it is
* cheaper to flush all TLBs and let them be refilled than
* flushing individual PFNs. Note that we do not track mm's
* to flush as that might simply be multiple full TLB flushes
* for no gain.
*/
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
flush_tlb_local();
}

/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
Expand All @@ -598,15 +585,14 @@ void try_to_unmap_flush(void)

cpu = get_cpu();

trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);

if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);

if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
smp_call_function_many(&tlb_ubc->cpumask,
percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
}

if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
cpumask_clear(&tlb_ubc->cpumask);
tlb_ubc->flush_required = false;
tlb_ubc->writable = false;
Expand Down

0 comments on commit 858eaaa

Please sign in to comment.