Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 362676
b: refs/heads/master
c: f36391d
h: refs/heads/master
v: v3
  • Loading branch information
David S. Miller committed Apr 19, 2013
1 parent 3fae68d commit 20a0005
Show file tree
Hide file tree
Showing 8 changed files with 242 additions and 56 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cbf1ef6b3345d2cc7e62407eec6a6f72a8b1346f
refs/heads/master: f36391d2790d04993f48da6a45810033a2cdf847
1 change: 1 addition & 0 deletions trunk/arch/sparc/include/asm/pgtable_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}

#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>

/* We provide our own get_unmapped_area to cope with VA holes and
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/sparc/include/asm/switch_to_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ do { \
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
do { flush_tlb_pending(); \
save_and_clear_fpu(); \
do { save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
Expand Down
37 changes: 31 additions & 6 deletions trunk/arch/sparc/include/asm/tlbflush_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,24 +11,40 @@
struct tlb_batch {
struct mm_struct *mm;
unsigned long tlb_nr;
unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR];
};

extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tsb_user(struct tlb_batch *tb);
extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);

/* TLB flush operations. */

extern void flush_tlb_pending(void);
static inline void flush_tlb_mm(struct mm_struct *mm)
{
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}

#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE

#define flush_tlb_range(vma,start,end) \
do { (void)(start); flush_tlb_pending(); } while (0)
#define flush_tlb_page(vma,addr) flush_tlb_pending()
#define flush_tlb_mm(mm) flush_tlb_pending()
extern void flush_tlb_pending(void);
extern void arch_enter_lazy_mmu_mode(void);
extern void arch_leave_lazy_mmu_mode(void);
#define arch_flush_lazy_mmu_mode() do {} while (0)

/* Local cpu only. */
extern void __flush_tlb_all(void);

extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);

#ifndef CONFIG_SMP
Expand All @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \
} while (0)

static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
}

#else /* CONFIG_SMP */

extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);

#define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \
} while (0)

#define global_flush_tlb_page(mm, vaddr) \
smp_flush_tlb_page(mm, vaddr)

#endif /* ! CONFIG_SMP */

#endif /* _SPARC64_TLBFLUSH_H */
41 changes: 37 additions & 4 deletions trunk/arch/sparc/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
}

extern unsigned long xcall_flush_tlb_mm;
extern unsigned long xcall_flush_tlb_pending;
extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu;
Expand Down Expand Up @@ -1074,23 +1074,56 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
put_cpu();
}

struct tlb_pending_info {
unsigned long ctx;
unsigned long nr;
unsigned long *vaddrs;
};

static void tlb_pending_func(void *info)
{
struct tlb_pending_info *t = info;

__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
}

void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
int cpu = get_cpu();

info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;

if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_pending,
ctx, nr, (unsigned long) vaddrs,
mm_cpumask(mm));
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
&info, 1);

__flush_tlb_pending(ctx, nr, vaddrs);

put_cpu();
}

void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
int cpu = get_cpu();

if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_page,
context, vaddr, 0,
mm_cpumask(mm));
__flush_tlb_page(context, vaddr);

put_cpu();
}

void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
Expand Down
38 changes: 34 additions & 4 deletions trunk/arch/sparc/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
struct mm_struct *mm = tb->mm;

if (tb->tlb_nr) {
flush_tsb_user(tb);
if (!tb->tlb_nr)
goto out;

if (CTX_VALID(tb->mm->context)) {
flush_tsb_user(tb);

if (CTX_VALID(mm->context)) {
if (tb->tlb_nr == 1) {
global_flush_tlb_page(mm, tb->vaddrs[0]);
} else {
#ifdef CONFIG_SMP
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
&tb->vaddrs[0]);
Expand All @@ -37,12 +43,30 @@ void flush_tlb_pending(void)
tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
tb->tlb_nr = 0;
}

tb->tlb_nr = 0;

out:
put_cpu_var(tlb_batch);
}

void arch_enter_lazy_mmu_mode(void)
{
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);

tb->active = 1;
}

void arch_leave_lazy_mmu_mode(void)
{
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);

if (tb->tlb_nr)
flush_tlb_pending();
tb->active = 0;
}

static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec)
{
Expand All @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0;
}

if (!tb->active) {
global_flush_tlb_page(mm, vaddr);
flush_tsb_user_page(mm, vaddr);
return;
}

if (nr == 0)
tb->mm = mm;

Expand Down
57 changes: 42 additions & 15 deletions trunk/arch/sparc/mm/tsb.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/tsb.h>
#include <asm/tlb.h>
#include <asm/oplib.h>

extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
Expand Down Expand Up @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}

static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
unsigned long tsb, unsigned long nentries)
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
unsigned long hash_shift,
unsigned long nentries)
{
unsigned long i;
unsigned long tag, ent, hash;

for (i = 0; i < tb->tlb_nr; i++) {
unsigned long v = tb->vaddrs[i];
unsigned long tag, ent, hash;
v &= ~0x1UL;
hash = tsb_hash(v, hash_shift, nentries);
ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);

v &= ~0x1UL;
tsb_flush(ent, tag);
}

hash = tsb_hash(v, hash_shift, nentries);
ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
unsigned long tsb, unsigned long nentries)
{
unsigned long i;

tsb_flush(ent, tag);
}
for (i = 0; i < tb->tlb_nr; i++)
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
}

void flush_tsb_user(struct tlb_batch *tb)
Expand Down Expand Up @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_unlock_irqrestore(&mm->context.lock, flags);
}

void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long nentries, base, flags;

spin_lock_irqsave(&mm->context.lock, flags);

base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);

#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}

#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K

Expand Down
Loading

0 comments on commit 20a0005

Please sign in to comment.