Skip to content

Commit

Permalink
powerpc/mm: refactor definition of pgtable_cache[]
Browse files Browse the repository at this point in the history
pgtable_cache[] is the same for the 4 subarches, lets make it common.

Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed May 2, 2019
1 parent dc09686 commit e80789a
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 86 deletions.
21 changes: 0 additions & 21 deletions arch/powerpc/include/asm/book3s/32/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,6 @@
#include <linux/threads.h>
#include <linux/slab.h>

/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
Expand Down Expand Up @@ -69,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
}

#define check_pgt_cache() do { } while (0)
#define get_hugepd_cache_index(x) (x)

#ifdef CONFIG_SMP
Expand Down
22 changes: 0 additions & 22 deletions arch/powerpc/include/asm/book3s/64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,26 +19,6 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;

/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]

extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
Expand Down Expand Up @@ -199,8 +179,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
pgtable_free_tlb(tlb, table, PTE_INDEX);
}

#define check_pgt_cache() do { } while (0)

extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
static inline void update_page_count(int psize, long count)
{
Expand Down
21 changes: 0 additions & 21 deletions arch/powerpc/include/asm/nohash/32/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,6 @@
#include <linux/threads.h>
#include <linux/slab.h>

/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
Expand Down Expand Up @@ -87,7 +67,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
}

#define check_pgt_cache() do { } while (0)
#define get_hugepd_cache_index(x) (x)

#ifdef CONFIG_SMP
Expand Down
22 changes: 0 additions & 22 deletions arch/powerpc/include/asm/nohash/64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,26 +18,6 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;

/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages (which are linked to a struct
* page for now, and drawn from the main get_free_pages() pool), the
* allocation size will be (2^index_size * sizeof(pointer)) and
* allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
Expand Down Expand Up @@ -140,6 +120,4 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)

#define check_pgt_cache() do { } while (0)

#endif /* _ASM_POWERPC_PGALLOC_64_H */
21 changes: 21 additions & 0 deletions arch/powerpc/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,27 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
pte_fragment_free((unsigned long *)ptepage, 0);
}

/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
* handle allocation. For PTE pages, the allocation size will be
* (2^index_size * sizeof(pointer)) and allocations are drawn from
* the kmem_cache in PGT_CACHE(index_size).
*
* The maximum index size needs to be big enough to allow any
* pagetable sizes we need, but small enough to fit in the low bits of
* any page table pointer. In other words all pagetables, even tiny
* ones, must be aligned to allow at least enough low 0 bits to
* contain this value. This value is also used as a mask, so it must
* be one less than a power of two.
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]

static inline void check_pgt_cache(void) { }

#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
Expand Down

0 comments on commit e80789a

Please sign in to comment.