Skip to content

Commit

Permalink
xtensa: use buddy allocator for PTE table
Browse files Browse the repository at this point in the history
At the moment xtensa uses slab allocator for PTE table.  It doesn't work
with enabled split page table lock: slab uses page->slab_cache and
page->first_page for its pages.  These fields share stroage with
page->ptl.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Chris Zankel <chris@zankel.net>
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Kirill A. Shutemov authored and Linus Torvalds committed Nov 15, 2013
1 parent 01058e7 commit f820e28
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 30 deletions.
20 changes: 12 additions & 8 deletions arch/xtensa/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,18 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}

/* Use a slab cache for the pte pages (see also sparc64 implementation) */

extern struct kmem_cache *pgtable_cache;

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
pte_t *ptep;
int i;

ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
pte_clear(NULL, 0, ptep + i);
return ptep;
}

static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
Expand All @@ -59,21 +63,21 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return NULL;
page = virt_to_page(pte);
if (!pgtable_page_ctor(page)) {
kmem_cache_free(pgtable_cache, pte);
__free_page(page);
return NULL;
}
return page;
}

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
free_page((unsigned long)pte);
}

static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
kmem_cache_free(pgtable_cache, page_address(pte));
__free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)

Expand Down
3 changes: 1 addition & 2 deletions arch/xtensa/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,12 +220,11 @@ extern unsigned long empty_zero_page[1024];
#ifdef CONFIG_MMU
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
extern void paging_init(void);
extern void pgtable_cache_init(void);
#else
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
static inline void pgtable_cache_init(void) { }
#endif
static inline void pgtable_cache_init(void) { }

/*
* The pmd contains the kernel virtual address of the pte page.
Expand Down
20 changes: 0 additions & 20 deletions arch/xtensa/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,23 +50,3 @@ void __init init_mmu(void)
*/
set_ptevaddr_register(PGTABLE_START);
}

struct kmem_cache *pgtable_cache __read_mostly;

static void pgd_ctor(void *addr)
{
pte_t *ptep = (pte_t *)addr;
int i;

for (i = 0; i < 1024; i++, ptep++)
pte_clear(NULL, 0, ptep);

}

void __init pgtable_cache_init(void)
{
pgtable_cache = kmem_cache_create("pgd",
PAGE_SIZE, PAGE_SIZE,
SLAB_HWCACHE_ALIGN,
pgd_ctor);
}

0 comments on commit f820e28

Please sign in to comment.