Skip to content

Commit

Permalink
powerpc/mm: Make some of the PGTABLE_RANGE dependency explicit
Browse files Browse the repository at this point in the history
slice array size and slice mask size depend on PGTABLE_RANGE.

Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
  • Loading branch information
Aneesh Kumar K.V authored and Benjamin Herrenschmidt committed Sep 17, 2012
1 parent f033d65 commit 78f1dbd
Show file tree
Hide file tree
Showing 8 changed files with 47 additions and 36 deletions.
15 changes: 10 additions & 5 deletions arch/powerpc/include/asm/mmu-hash64.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,13 @@
#include <asm/asm-compat.h>
#include <asm/page.h>

/*
* This is necessary to get the definition of PGTABLE_RANGE which we
* need for various slices related matters. Note that this isn't the
* complete pgtable.h but only a portion of it.
*/
#include <asm/pgtable-ppc64.h>

/*
* Segment table
*/
Expand Down Expand Up @@ -414,6 +421,8 @@ extern void slb_set_size(u16 size);
srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
add rt,rt,rx

/* 4 bits per slice and we have one slice per 1TB */
#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -458,11 +467,7 @@ typedef struct {

#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
/*
* Right now we support 64TB and 4 bits for each
* 1TB slice we need 32 bytes for 64TB.
*/
unsigned char high_slices_psize[32]; /* 4 bits per slice for now */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
#else
u16 sllp; /* SLB page size encoding */
#endif
Expand Down
9 changes: 9 additions & 0 deletions arch/powerpc/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
extern u64 ppc64_rma_size;
#endif /* CONFIG_PPC64 */

struct mm_struct;
#ifdef CONFIG_DEBUG_VM
extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
#else /* CONFIG_DEBUG_VM */
static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
}
#endif /* !CONFIG_DEBUG_VM */

#endif /* !__ASSEMBLY__ */

/* The kernel use the constants below to index in the page sizes array.
Expand Down
12 changes: 8 additions & 4 deletions arch/powerpc/include/asm/page_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,18 @@ extern u64 ppc64_pft_size;
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)

/*
* 1 bit per slice and we have one slice per 1TB
* Right now we support only 64TB.
* IF we change this we will have to change the type
* of high_slices
*/
#define SLICE_MASK_SIZE 8

#ifndef __ASSEMBLY__

struct slice_mask {
u16 low_slices;
/*
* This should be derived out of PGTABLE_RANGE. For the current
* max 64TB, u64 should be ok.
*/
u64 high_slices;
};

Expand Down
17 changes: 2 additions & 15 deletions arch/powerpc/include/asm/pgtable-ppc64.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,6 @@
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)


/* Some sanity checking */
#if TASK_SIZE_USER64 > PGTABLE_RANGE
#error TASK_SIZE_USER64 exceeds pagetable range
#endif

#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif

/*
* Define the address range of the kernel non-linear virtual area
*/
Expand Down Expand Up @@ -117,9 +106,6 @@

#ifndef __ASSEMBLY__

#include <linux/stddef.h>
#include <asm/tlbflush.h>

/*
* This is the default implementation of various PTE accessors, it's
* used in all cases except Book3S with 64K pages where we have a
Expand Down Expand Up @@ -198,7 +184,8 @@
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge);

/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
Expand Down
10 changes: 2 additions & 8 deletions arch/powerpc/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,6 @@

struct mm_struct;

#ifdef CONFIG_DEBUG_VM
extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
#else /* CONFIG_DEBUG_VM */
static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
}
#endif /* !CONFIG_DEBUG_VM */

#endif /* !__ASSEMBLY__ */

#if defined(CONFIG_PPC64)
Expand All @@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)

#ifndef __ASSEMBLY__

#include <asm/tlbflush.h>

/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
Expand Down
3 changes: 0 additions & 3 deletions arch/powerpc/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);

extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge);

#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE

static inline void arch_enter_lazy_mmu_mode(void)
Expand Down
12 changes: 11 additions & 1 deletion arch/powerpc/mm/pgtable_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,18 @@

#include "mmu_decl.h"

unsigned long ioremap_bot = IOREMAP_BASE;
/* Some sanity checking */
#if TASK_SIZE_USER64 > PGTABLE_RANGE
#error TASK_SIZE_USER64 exceeds pagetable range
#endif

#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif

unsigned long ioremap_bot = IOREMAP_BASE;

#ifdef CONFIG_PPC_MMU_NOHASH
static void *early_alloc_pgtable(unsigned long size)
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/mm/slice.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@
#include <asm/mmu.h>
#include <asm/spu.h>

/* some sanity checks */
#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
#error PGTABLE_RANGE exceeds slice_mask high_slices size
#endif

static DEFINE_SPINLOCK(slice_convert_lock);


Expand Down

0 comments on commit 78f1dbd

Please sign in to comment.