Skip to content

Commit

Permalink
[SPARC64]: Add a secondary TSB for hugepage mappings.
Browse files Browse the repository at this point in the history
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller authored and David S. Miller committed Mar 22, 2006
1 parent 14778d9 commit dcc1e8d
Show file tree
Hide file tree
Showing 13 changed files with 462 additions and 201 deletions.
4 changes: 2 additions & 2 deletions arch/sparc64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB
bool "4MB"

config HUGETLB_PAGE_SIZE_512K
depends on !SPARC64_PAGE_SIZE_4MB
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
bool "512K"

config HUGETLB_PAGE_SIZE_64K
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K
bool "64K"

endchoice
Expand Down
39 changes: 22 additions & 17 deletions arch/sparc64/kernel/sun4v_tlb_miss.S
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
* tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \
srlx VADDR, PAGE_SHIFT, TMP1; \
srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \
Expand All @@ -53,7 +53,7 @@ sun4v_itlb_miss:

LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)

/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
Expand Down Expand Up @@ -99,7 +99,7 @@ sun4v_dtlb_miss:

LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)

/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
Expand Down Expand Up @@ -171,21 +171,26 @@ sun4v_dtsb_miss:

/* fallthrough */

/* Create TSB pointer into %g1. This is something like:
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
sun4v_tsb_miss_common:
COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)

/* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
* still in %g2, so it's quite trivial to get at the PGD PHYS value
* so we can preload it into %g7.
*/
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2

#ifdef CONFIG_HUGETLB_PAGE
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
cmp %g5, -1
be,pt %xcc, 80f
nop
COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)

/* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2

80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif

ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7

Expand Down
21 changes: 20 additions & 1 deletion arch/sparc64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)

extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void);
extern void tsb_config_offsets_are_bolixed_dave(void);

/* Only invoked on boot processor. */
void __init trap_init(void)
Expand Down Expand Up @@ -2535,9 +2536,27 @@ void __init trap_init(void)
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA !=
offsetof(struct trap_per_cpu, cpu_list_pa)))
offsetof(struct trap_per_cpu, cpu_list_pa)) ||
(TRAP_PER_CPU_TSB_HUGE !=
offsetof(struct trap_per_cpu, tsb_huge)) ||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)))
trap_per_cpu_offsets_are_bolixed_dave();

if ((TSB_CONFIG_TSB !=
offsetof(struct tsb_config, tsb)) ||
(TSB_CONFIG_RSS_LIMIT !=
offsetof(struct tsb_config, tsb_rss_limit)) ||
(TSB_CONFIG_NENTRIES !=
offsetof(struct tsb_config, tsb_nentries)) ||
(TSB_CONFIG_REG_VAL !=
offsetof(struct tsb_config, tsb_reg_val)) ||
(TSB_CONFIG_MAP_VADDR !=
offsetof(struct tsb_config, tsb_map_vaddr)) ||
(TSB_CONFIG_MAP_PTE !=
offsetof(struct tsb_config, tsb_map_pte)))
tsb_config_offsets_are_bolixed_dave();

/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
Expand Down
Loading

0 comments on commit dcc1e8d

Please sign in to comment.