Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 351207
b: refs/heads/master
c: 0fbebed
h: refs/heads/master
i:
  351205: a70b3de
  351203: ee12be6
  351199: 2428105
v: v3
  • Loading branch information
David S. Miller committed Feb 20, 2013
1 parent a532613 commit 6dab677
Show file tree
Hide file tree
Showing 7 changed files with 73 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bcd896bae0166b4443503482a26ecf84d9ba60ab
refs/heads/master: 0fbebed682ff2788dee58e8d7f7dda46e33aa10b
1 change: 0 additions & 1 deletion trunk/arch/sparc/include/asm/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,

static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
hugetlb_setup(mm);
}

static inline int is_hugepage_only_range(struct mm_struct *mm,
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/sparc/include/asm/page_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
#ifndef __ASSEMBLY__

#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
struct mm_struct;
extern void hugetlb_setup(struct mm_struct *mm);
struct pt_regs;
extern void hugetlb_setup(struct pt_regs *regs);
#endif

#define WANT_PAGE_VIRTUAL
Expand Down
39 changes: 35 additions & 4 deletions trunk/arch/sparc/kernel/tsb.S
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath:
nop

/* It is a huge page, use huge page TSB entry address we
* calculated above.
* calculated above. If the huge page TSB has not been
* allocated, setup a trap stack and call hugetlb_setup()
* to do so, then return from the trap to replay the TLB
* miss.
*
* This is necessary to handle the case of transparent huge
* pages where we don't really have a non-atomic context
* in which to allocate the hugepage TSB hash table. When
* the 'mm' faults in the hugepage for the first time, we
* thus handle it here. This also makes sure that we can
* allocate the TSB hash table on the correct NUMA node.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
cmp %g2, -1
movne %xcc, %g2, %g1
ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
cmp %g1, -1
bne,pt %xcc, 60f
nop

661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
nop
.previous

rdpr %tl, %g3
cmp %g3, 1
bne,pn %xcc, winfix_trampoline
nop
ba,pt %xcc, etrap
rd %pc, %g7
call hugetlb_setup
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop

60:
#endif

Expand Down
9 changes: 7 additions & 2 deletions trunk/arch/sparc/mm/fault_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,8 +472,13 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
else
hugetlb_setup(regs);

}
#endif
return;

Expand Down
24 changes: 19 additions & 5 deletions trunk/arch/sparc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -2718,14 +2718,28 @@ static void context_reload(void *__data)
load_secondary_context(mm);
}

void hugetlb_setup(struct mm_struct *mm)
void hugetlb_setup(struct pt_regs *regs)
{
struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
struct mm_struct *mm = current->mm;
struct tsb_config *tp;

if (likely(tp->tsb != NULL))
return;
if (in_atomic() || !mm) {
const struct exception_table_entry *entry;

entry = search_exception_tables(regs->tpc);
if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
pr_alert("Unexpected HugeTLB setup in atomic context.\n");
die_if_kernel("HugeTSB in atomic", regs);
}

tp = &mm->context.tsb_block[MM_TSB_HUGE];
if (likely(tp->tsb == NULL))
tsb_grow(mm, MM_TSB_HUGE, 0);

tsb_grow(mm, MM_TSB_HUGE, 0);
tsb_context_switch(mm);
smp_tsb_sync(mm);

Expand Down
11 changes: 9 additions & 2 deletions trunk/arch/sparc/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
mm->context.huge_pte_count++;
else
mm->context.huge_pte_count--;
if (mm->context.huge_pte_count == 1)
hugetlb_setup(mm);

/* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held
* and thus we'll end up doing a GFP_KERNEL allocation
* in an atomic context.
*
* Instead, we let the first TLB miss on a hugepage
* take care of this.
*/
}

if (!pmd_none(orig)) {
Expand Down

0 comments on commit 6dab677

Please sign in to comment.