Skip to content

Commit

Permalink
powerpc/mm/nohash: Remove pte fragment dependency from nohash
Browse files Browse the repository at this point in the history
Now that we have removed 64K page size support, the RCU page table free can
be much simpler for nohash. Make a copy of the the rcu callback to pgalloc.h
header similar to nohash 32. We could possibly merge 32 and 64 bit there. But
that is for a later patch

We also move the book3s specific handler to pgtable_book3s64.c. This will be
updated in a later patch to handle split pmd ptlock.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Aneesh Kumar K.V authored and Michael Ellerman committed May 15, 2018
1 parent 7820856 commit 7023467
Show file tree
Hide file tree
Showing 3 changed files with 159 additions and 126 deletions.
57 changes: 45 additions & 12 deletions arch/powerpc/include/asm/nohash/64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,

#define pmd_pgtable(pmd) pmd_page(pmd)

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
pgtable_gfp_flags(mm, GFP_KERNEL));
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}


static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
Expand Down Expand Up @@ -118,26 +130,47 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
__free_page(ptepage);
}

extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
static inline void pgtable_free(void *table, int shift)
{
if (!shift) {
pgtable_page_dtor(table);
free_page((unsigned long)table);
} else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
}

#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
#endif
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
tlb_flush_pgtable(tlb, address);
pgtable_free_tlb(tlb, page_address(table), 0);
unsigned long pgf = (unsigned long)table;

BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
pgf |= shift;
tlb_remove_table(tlb, (void *)pgf);
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline void __tlb_remove_table(void *_table)
{
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
pgtable_gfp_flags(mm, GFP_KERNEL));
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;

pgtable_free(table, shift);
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#else
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
pgtable_free(table, shift);
}
#endif

static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
pgtable_free_tlb(tlb, page_address(table), 0);
}

#define __pmd_free_tlb(tlb, pmd, addr) \
Expand Down
114 changes: 114 additions & 0 deletions arch/powerpc/mm/pgtable-book3s64.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,3 +225,117 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
#ifdef CONFIG_PPC_64K_PAGES
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;

spin_lock(&mm->page_table_lock);
ret = mm->context.pte_frag;
if (ret) {
pte_frag = ret + PTE_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
pte_frag = NULL;
mm->context.pte_frag = pte_frag;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}

static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct page *page;

if (!kernel) {
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
} else {
page = alloc_page(PGALLOC_GFP);
if (!page)
return NULL;
}

ret = page_address(page);
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
if (likely(!mm->context.pte_frag)) {
set_page_count(page, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);

return (pte_t *)ret;
}

pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
pte_t *pte;

pte = get_pte_from_cache(mm);
if (pte)
return pte;

return __alloc_for_ptecache(mm, kernel);
}

#endif /* CONFIG_PPC_64K_PAGES */

void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);

if (put_page_testzero(page)) {
if (!kernel)
pgtable_page_dtor(page);
free_unref_page(page);
}
}

#ifdef CONFIG_SMP
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
unsigned long pgf = (unsigned long)table;

BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
pgf |= shift;
tlb_remove_table(tlb, (void *)pgf);
}

void __tlb_remove_table(void *_table)
{
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
unsigned int shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;

if (!shift)
/* PTE page needs special handling */
pte_fragment_free(table, 0);
else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
}
#else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
if (!shift) {
/* PTE page needs special handling */
pte_fragment_free(table, 0);
} else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
}
#endif
114 changes: 0 additions & 114 deletions arch/powerpc/mm/pgtable_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,120 +313,6 @@ struct page *pmd_page(pmd_t pmd)
return virt_to_page(pmd_page_vaddr(pmd));
}

#ifdef CONFIG_PPC_64K_PAGES
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;

spin_lock(&mm->page_table_lock);
ret = mm->context.pte_frag;
if (ret) {
pte_frag = ret + PTE_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
pte_frag = NULL;
mm->context.pte_frag = pte_frag;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}

static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct page *page;

if (!kernel) {
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
} else {
page = alloc_page(PGALLOC_GFP);
if (!page)
return NULL;
}

ret = page_address(page);
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
if (likely(!mm->context.pte_frag)) {
set_page_count(page, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);

return (pte_t *)ret;
}

pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
pte_t *pte;

pte = get_pte_from_cache(mm);
if (pte)
return pte;

return __alloc_for_ptecache(mm, kernel);
}

#endif /* CONFIG_PPC_64K_PAGES */

void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
if (put_page_testzero(page)) {
if (!kernel)
pgtable_page_dtor(page);
free_unref_page(page);
}
}

#ifdef CONFIG_SMP
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
unsigned long pgf = (unsigned long)table;

BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
pgf |= shift;
tlb_remove_table(tlb, (void *)pgf);
}

void __tlb_remove_table(void *_table)
{
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;

if (!shift)
/* PTE page needs special handling */
pte_fragment_free(table, 0);
else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
}
#else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
if (!shift) {
/* PTE page needs special handling */
pte_fragment_free(table, 0);
} else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
}
#endif

#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
Expand Down

0 comments on commit 7023467

Please sign in to comment.