Skip to content

Commit

Permalink
powerpc/nohash: Remove hash related code from nohash headers.
Browse files Browse the repository at this point in the history
When nohash and book3s header were split, some hash related stuff
remained in the nohash header. This patch removes them.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Duplicate pte_young() to avoid circular header dependency]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed May 7, 2018
1 parent 722cde7 commit 45201c8
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 75 deletions.
34 changes: 9 additions & 25 deletions arch/powerpc/include/asm/nohash/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__

#define pte_clear(mm, addr, ptep) \
do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
do { pte_update(ptep, ~0, 0); } while (0)

#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
Expand All @@ -145,21 +145,6 @@ static inline void pmd_clear(pmd_t *pmdp)



/*
* When flushing the tlb entry for a page, we also need to flush the hash
* table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
*/
extern int flush_hash_pages(unsigned context, unsigned long va,
unsigned long pmdval, int count);

/* Add an HPTE to the hash table */
extern void add_hash_page(unsigned context, unsigned long va,
unsigned long pmdval);

/* Flush an entry from the TLB/hash table */
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);

/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
Expand Down Expand Up @@ -246,12 +231,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
{
unsigned long old;
old = pte_update(ptep, _PAGE_ACCESSED, 0);
#if _PAGE_HASHPTE != 0
if (old & _PAGE_HASHPTE) {
unsigned long ptephys = __pa(ptep) & PAGE_MASK;
flush_hash_pages(context, addr, ptephys, 1);
}
#endif
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
Expand All @@ -261,7 +240,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
return __pte(pte_update(ptep, ~0, 0));
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Expand All @@ -288,8 +267,13 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
pte_update(ptep, clr, set);
}

static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}

#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)

/*
* Note that on Book E processors, the pmd contains the kernel virtual
Expand Down Expand Up @@ -330,7 +314,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
* must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
* must not include the _PAGE_PRESENT bit.
* -- paulus
*/
#define __swp_type(entry) ((entry).val & 0x1f)
Expand Down
21 changes: 7 additions & 14 deletions arch/powerpc/include/asm/nohash/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,6 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge);

/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
Expand Down Expand Up @@ -205,20 +203,20 @@ static inline unsigned long pte_update(struct mm_struct *mm,
if (!huge)
assert_pte_locked(mm, addr);

#ifdef CONFIG_PPC_BOOK3S_64
if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge);
#endif

return old;
}

static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}

static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
unsigned long old;

if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
if (pte_young(*ptep))
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
Expand Down Expand Up @@ -312,7 +310,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
}

#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)

#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
Expand All @@ -324,11 +322,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
/* Encode and de-code a swap entry */
#define MAX_SWAPFILES_CHECK() do { \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
/* \
* Don't have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte. \
*/ \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
} while (0)
/*
* on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
Expand Down
39 changes: 4 additions & 35 deletions arch/powerpc/include/asm/nohash/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ static inline int pte_write(pte_t pte)
}
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
Expand Down Expand Up @@ -148,53 +147,23 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
* helper pte_update() which does an atomic update. We need to do that
* because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
* per-CPU PTE such as a kmap_atomic, we do a simple update preserving
* the hash bits instead (ie, same as the non-SMP case)
*/
if (percpu)
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
else
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));

#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care,
* in the hash code, to pre-invalidate if the PTE was already hashed,
* which synchronizes us with any concurrent invalidation.
* In the percpu case, we also fallback to the simple update preserving
* the hash bits
* with a barrier in between.
* In the percpu case, we also fallback to the simple update
*/
if (percpu) {
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
*ptep = pte;
return;
}
#if _PAGE_HASHPTE != 0
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(mm, ptep, addr);
#endif
__asm__ __volatile__("\
stw%U0%X0 %2,%0\n\
eieio\n\
stw%U0%X0 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");

#elif defined(CONFIG_PPC_STD_MMU_32)
/* Third case is 32-bit hash table in UP mode, we need to preserve
* the _PAGE_HASHPTE bit since we may not have invalidated the previous
* translation in the hash yet (done in a subsequent flush_tlb_xxx())
* and see we need to keep track that this PTE needs invalidating
*/
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));

#else
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 32-bit PTEs.
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/include/asm/nohash/pte-book3e.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@
#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)

#define _PAGE_HASHPTE 0
#define _PAGE_BUSY 0

#define _PAGE_SPECIAL _PAGE_SW0
Expand Down

0 comments on commit 45201c8

Please sign in to comment.