Skip to content

Commit

Permalink
powerpc/mm: Add radix pgalloc details
Browse files Browse the repository at this point in the history
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Aneesh Kumar K.V authored and Michael Ellerman committed May 11, 2016
1 parent 934828e commit a2f41eb
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 7 deletions.
34 changes: 30 additions & 4 deletions arch/powerpc/include/asm/book3s/64/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,19 +50,45 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
extern void __tlb_remove_table(void *_table);
#endif

static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
#ifdef CONFIG_PPC_64K_PAGES
return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else
struct page *page;
page = alloc_pages(PGALLOC_GFP, 4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
#endif
}

static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_PPC_64K_PAGES
free_page((unsigned long)pgd);
#else
free_pages((unsigned long)pgd, 4);
#endif
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (radix_enabled())
return radix__pgd_alloc(mm);
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (radix_enabled())
return radix__pgd_free(mm, pgd);
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}

static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
pgd_set(pgd, __pgtable_ptr_val(pud));
pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
}

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
Expand All @@ -78,7 +104,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)

static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_set(pud, __pgtable_ptr_val(pmd));
pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
}

static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
Expand Down Expand Up @@ -107,13 +133,13 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
pmd_set(pmd, __pgtable_ptr_val(pte));
pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte_page)
{
pmd_set(pmd, __pgtable_ptr_val(pte_page));
pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
}

static inline pgtable_t pmd_pgtable(pmd_t pmd)
Expand Down
10 changes: 8 additions & 2 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,17 @@ extern unsigned long __pgd_table_size;
#define PMD_TABLE_SIZE __pmd_table_size
#define PUD_TABLE_SIZE __pud_table_size
#define PGD_TABLE_SIZE __pgd_table_size

extern unsigned long __pmd_val_bits;
extern unsigned long __pud_val_bits;
extern unsigned long __pgd_val_bits;
#define PMD_VAL_BITS __pmd_val_bits
#define PUD_VAL_BITS __pud_val_bits
#define PGD_VAL_BITS __pgd_val_bits
/*
* Pgtable size used by swapper, init in asm code
* We will switch this later to radix PGD
*/
#define MAX_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
#define MAX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)

#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,13 @@ void __init hash__early_init_mmu(void)
__pmd_table_size = H_PMD_TABLE_SIZE;
__pud_table_size = H_PUD_TABLE_SIZE;
__pgd_table_size = H_PGD_TABLE_SIZE;
/*
* 4k use hugepd format, so for hash set then to
* zero
*/
__pmd_val_bits = 0;
__pud_val_bits = 0;
__pgd_val_bits = 0;
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
Expand Down
5 changes: 4 additions & 1 deletion arch/powerpc/mm/pgtable-radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,11 @@ void __init radix__early_init_mmu(void)
__pud_table_size = RADIX_PUD_TABLE_SIZE;
__pgd_table_size = RADIX_PGD_TABLE_SIZE;

radix_init_page_sizes();
__pmd_val_bits = RADIX_PMD_VAL_BITS;
__pud_val_bits = RADIX_PUD_VAL_BITS;
__pgd_val_bits = RADIX_PGD_VAL_BITS;

radix_init_page_sizes();
if (!firmware_has_feature(FW_FEATURE_LPAR))
radix_init_partition_table();

Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/mm/pgtable_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,12 @@ unsigned long __pud_table_size;
EXPORT_SYMBOL(__pud_table_size);
unsigned long __pgd_table_size;
EXPORT_SYMBOL(__pgd_table_size);
unsigned long __pmd_val_bits;
EXPORT_SYMBOL(__pmd_val_bits);
unsigned long __pud_val_bits;
EXPORT_SYMBOL(__pud_val_bits);
unsigned long __pgd_val_bits;
EXPORT_SYMBOL(__pgd_val_bits);

#endif
unsigned long ioremap_bot = IOREMAP_BASE;
Expand Down

0 comments on commit a2f41eb

Please sign in to comment.