Skip to content

Commit

Permalink
[SPARC64]: No need to D-cache color page tables any longer.
Browse files Browse the repository at this point in the history
Unlike the virtual page tables, the new TSB scheme does not
require this ugly hack.

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller authored and David S. Miller committed Mar 20, 2006
1 parent 74bf431 commit 05e28f9
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 122 deletions.
71 changes: 6 additions & 65 deletions arch/sparc64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,17 +145,19 @@ int bigkernel = 0;
#define PGT_CACHE_LOW 25
#define PGT_CACHE_HIGH 50

#ifndef CONFIG_SMP
struct pgtable_cache_struct pgt_quicklists;
#endif

void check_pgt_cache(void)
{
preempt_disable();
if (pgtable_cache_size > PGT_CACHE_HIGH) {
do {
if (pgd_quicklist)
free_pgd_slow(get_pgd_fast());
if (pte_quicklist[0])
free_pte_slow(pte_alloc_one_fast(NULL, 0));
if (pte_quicklist[1])
free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
if (pte_quicklist)
free_pte_slow(pte_alloc_one_fast());
} while (pgtable_cache_size > PGT_CACHE_LOW);
}
preempt_enable();
Expand Down Expand Up @@ -962,67 +964,6 @@ void get_new_mmu_context(struct mm_struct *mm)
spin_unlock(&ctx_alloc_lock);
}

#ifndef CONFIG_SMP
struct pgtable_cache_struct pgt_quicklists;
#endif

/* XXX We don't need to color these things in the D-cache any longer. */
#ifdef DCACHE_ALIASING_POSSIBLE
#define DC_ALIAS_SHIFT 1
#else
#define DC_ALIAS_SHIFT 0
#endif
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page;
unsigned long color;

{
pte_t *ptep = pte_alloc_one_fast(mm, address);

if (ptep)
return ptep;
}

color = VPTE_COLOR(address);
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
if (page) {
unsigned long *to_free;
unsigned long paddr;
pte_t *pte;

#ifdef DCACHE_ALIASING_POSSIBLE
set_page_count(page, 1);
ClearPageCompound(page);

set_page_count((page + 1), 1);
ClearPageCompound(page + 1);
#endif
paddr = (unsigned long) page_address(page);
memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));

if (!color) {
pte = (pte_t *) paddr;
to_free = (unsigned long *) (paddr + PAGE_SIZE);
} else {
pte = (pte_t *) (paddr + PAGE_SIZE);
to_free = (unsigned long *) paddr;
}

#ifdef DCACHE_ALIASING_POSSIBLE
/* Now free the other one up, adjust cache size. */
preempt_disable();
*to_free = (unsigned long) pte_quicklist[color ^ 0x1];
pte_quicklist[color ^ 0x1] = to_free;
pgtable_cache_size++;
preempt_enable();
#endif

return pte;
}
return NULL;
}

void sparc_ultra_dump_itlb(void)
{
int slot;
Expand Down
5 changes: 3 additions & 2 deletions include/asm-sparc64/cpudata.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ typedef struct {
/* Dcache line 2 */
unsigned int pgcache_size;
unsigned int __pad1;
unsigned long *pte_cache[2];
unsigned long *pte_cache;
unsigned long *pgd_cache;
unsigned long __pad2;

/* Dcache line 3, rarely used */
unsigned int dcache_size;
Expand All @@ -30,8 +31,8 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
unsigned int __pad2;
unsigned int __pad3;
unsigned int __pad4;
} cpuinfo_sparc;

DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
Expand Down
101 changes: 46 additions & 55 deletions include/asm-sparc64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,15 @@
#else
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache[2];
unsigned long *pte_cache;
unsigned int pgcache_size;
} pgt_quicklists;
#endif
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgcache_size)

static __inline__ void free_pgd_fast(pgd_t *pgd)
static inline void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
Expand All @@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable();
}

static __inline__ pgd_t *get_pgd_fast(void)
static inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;

Expand All @@ -56,66 +55,52 @@ static __inline__ pgd_t *get_pgd_fast(void)
return (pgd_t *)ret;
}

static __inline__ void free_pgd_slow(pgd_t *pgd)
static inline void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}

/* XXX This crap can die, no longer using virtual page tables... */
#ifdef DCACHE_ALIASING_POSSIBLE
#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
#else
#define VPTE_COLOR(address) 0
#define DCACHE_COLOR(address) 0
#endif

#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)

static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline pmd_t *pmd_alloc_one_fast(void)
{
unsigned long *ret;
int color = 0;

preempt_disable();
if (pte_quicklist[color] == NULL)
color = 1;

if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret = (unsigned long *) pte_quicklist;
if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();

return (pmd_t *)ret;
return (pmd_t *) ret;
}

static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;

pmd = pmd_alloc_one_fast(mm, address);
if (!pmd) {
pmd = pmd_alloc_one_fast();
if (unlikely(!pmd)) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
}
return pmd;
}

static __inline__ void free_pmd_fast(pmd_t *pmd)
static inline void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);

preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pmd;
*(unsigned long *)pmd = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pmd;
pgtable_cache_size++;
preempt_enable();
}

static __inline__ void free_pmd_slow(pmd_t *pmd)
static inline void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
Expand All @@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))

extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);

static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = pte_alloc_one_kernel(mm, addr);

if (pte)
return virt_to_page(pte);

return NULL;
}

static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline pte_t *pte_alloc_one_fast(void)
{
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;

preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret = (unsigned long *) pte_quicklist;
if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;

return (pte_t *) ret;
}

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *ptep = pte_alloc_one_fast();

if (likely(ptep))
return ptep;

return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}

static __inline__ void free_pte_fast(pte_t *pte)
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
pte_t *pte = pte_alloc_one_fast();

if (likely(pte))
return virt_to_page(pte);

return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
}

static inline void free_pte_fast(pte_t *pte)
{
preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pte;
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}

static __inline__ void free_pte_slow(pte_t *pte)
static inline void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
free_page((unsigned long) pte);
}

static inline void pte_free_kernel(pte_t *pte)
Expand Down

0 comments on commit 05e28f9

Please sign in to comment.