Skip to content

Commit

Permalink
x86/tlb: fall back to flush all when meet a THP large page
Browse files Browse the repository at this point in the history
We don't need to flush large pages by PAGE_SIZE step, that just waste
time. and actually, large page don't need 'invlpg' optimizing according
to our micro benchmark. So, just flush whole TLB is enough for them.

The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
with THP 'always' setting.

Multi-thread testing, '-t' paramter is thread number:
                       without this patch 	with this patch
./mprotect -t 1         14ns                       13ns
./mprotect -t 2         13ns                       13ns
./mprotect -t 4         12ns                       11ns
./mprotect -t 8         14ns                       10ns
./mprotect -t 16        28ns                       28ns
./mprotect -t 32        54ns                       52ns
./mprotect -t 128       200ns                      200ns

Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-4-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
  • Loading branch information
Alex Shi authored and H. Peter Anvin committed Jun 28, 2012
1 parent e7b52ff commit d8dfe60
Showing 1 changed file with 34 additions and 0 deletions.
34 changes: 34 additions & 0 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm)

#define FLUSHALL_BAR 16

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long addr = ALIGN(start, HPAGE_SIZE);
for (; addr < end; addr += HPAGE_SIZE) {
pgd = pgd_offset(mm, addr);
if (likely(!pgd_none(*pgd))) {
pud = pud_offset(pgd, addr);
if (likely(!pud_none(*pud))) {
pmd = pmd_offset(pud, addr);
if (likely(!pmd_none(*pmd)))
if (pmd_large(*pmd))
return addr;
}
}
}
return 0;
}
#else
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
return 0;
}
#endif
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm;

if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
flush_all:
flush_tlb_mm(vma->vm_mm);
return;
}
Expand All @@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
preempt_enable();
goto flush_all;
}
for (addr = start; addr < end;
addr += PAGE_SIZE)
__flush_tlb_single(addr);
Expand Down

0 comments on commit d8dfe60

Please sign in to comment.