Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 318431
b: refs/heads/master
c: 611ae8e
h: refs/heads/master
i:
  318429: 4227866
  318427: 01a06ec
  318423: 3d492f7
  318415: 7ef6da7
  318399: cda03b6
v: v3
  • Loading branch information
Alex Shi authored and H. Peter Anvin committed Jun 28, 2012
1 parent 6803f6e commit 266467d
Show file tree
Hide file tree
Showing 4 changed files with 69 additions and 71 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 597e1c3580b7cfd95bb0f3167e2b297bf8a5a3ae
refs/heads/master: 611ae8e3f5204f7480b3b405993b3352cfa16662
9 changes: 8 additions & 1 deletion trunk/arch/x86/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)

#define tlb_flush(tlb) \
{ \
if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
}

#include <asm-generic/tlb.h>

Expand Down
17 changes: 14 additions & 3 deletions trunk/arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}

static inline void flush_tlb_mm_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}

static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
Expand All @@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void)

#define local_flush_tlb() __flush_tlb()

#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end) \
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);

#define flush_tlb() flush_tlb_current_task()

Expand Down
112 changes: 46 additions & 66 deletions trunk/arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,23 +301,10 @@ void flush_tlb_current_task(void)
preempt_enable();
}

void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();

if (current->active_mm == mm) {
if (current->mm)
local_flush_tlb();
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);

preempt_enable();
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* It can find out the THP large page, or
* HUGETLB page in tlb_flush when THP disabled
*/
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
Expand All @@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm,
}
return 0;
}
#else
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
return 0;
}
#endif
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm;

if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
flush_all:
flush_tlb_mm(vma->vm_mm);
return;
}
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
unsigned act_entries, tlb_entries = 0;

preempt_disable();
mm = vma->vm_mm;
if (current->active_mm == mm) {
if (current->mm) {
unsigned long addr, vmflag = vma->vm_flags;
unsigned act_entries, tlb_entries = 0;
if (current->active_mm != mm)
goto flush_all;

if (vmflag & VM_EXEC)
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];

act_entries = tlb_entries > mm->total_vm ?
mm->total_vm : tlb_entries;
if (!current->mm) {
leave_mm(smp_processor_id());
goto flush_all;
}

if ((end - start) >> PAGE_SHIFT >
act_entries >> tlb_flushall_shift)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
preempt_enable();
goto flush_all;
}
for (addr = start; addr < end;
addr += PAGE_SIZE)
__flush_tlb_single(addr);
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
|| vmflag == VM_HUGETLB) {
local_flush_tlb();
goto flush_all;
}

if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm,
start, end);
preempt_enable();
return;
}
} else {
leave_mm(smp_processor_id());
/* In modern CPU, last level tlb used for both data/ins */
if (vmflag & VM_EXEC)
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];
/* Assume all of TLB entries was occupied by this task */
act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;

/* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
local_flush_tlb();
goto flush_all;
}
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE)
__flush_tlb_single(addr);

if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
return;
}

flush_all:
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}


void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
Expand Down

0 comments on commit 266467d

Please sign in to comment.