Skip to content

Commit

Permalink
powerpc/mm/radix: Change pte relax sequence to handle nest MMU hang
Browse files Browse the repository at this point in the history
When relaxing access (read -> read_write update), pte needs to be marked invalid
to handle a nest MMU bug. We also need to do a tlb flush after the pte is
marked invalid before updating the pte with new access bits.

We also move tlb flush to platform specific __ptep_set_access_flags. This will
help us to gerid of unnecessary tlb flush on BOOK3S 64 later. We don't do that
in this patch. This also helps in avoiding multiple tlbies with coprocessor
attached.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Aneesh Kumar K.V authored and Michael Ellerman committed Jun 3, 2018
1 parent e4c1112 commit bd5050e
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 7 deletions.
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/book3s/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long clr = ~pte_val(entry) & _PAGE_RO;

pte_update(ptep, clr, set);

flush_tlb_page(vma, address);
}

#define __HAVE_ARCH_PTE_SAME
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/nohash/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);

pte_update(ptep, clr, set);

flush_tlb_page(vma, address);
}

static inline int pte_young(pte_t pte)
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/nohash/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long old = pte_val(*ptep);
*ptep = __pte(old | bits);
#endif

flush_tlb_page(vma, address);
}

#define __HAVE_ARCH_PTE_SAME
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/tlbflush.h>

struct mm_struct;

Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/mm/pgtable-book3s64.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
*/
__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
pmd_pte(entry), address, MMU_PAGE_2M);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
Expand Down
14 changes: 10 additions & 4 deletions arch/powerpc/mm/pgtable-radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -1091,18 +1091,24 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
struct mm_struct *mm = vma->vm_mm;
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);

if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
/*
* To avoid NMMU hang while relaxing access, we need mark
* the pte invalid in between.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte;

old_pte = __radix_pte_update(ptep, ~0, 0);
/*
* new value of pte
*/
new_pte = old_pte | set;
radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
radix__flush_tlb_page_psize(mm, address, psize);
__radix_pte_update(ptep, 0, new_pte);
} else
} else {
__radix_pte_update(ptep, 0, set);
radix__flush_tlb_page_psize(mm, address, psize);
}
asm volatile("ptesync" : : : "memory");
}
2 changes: 0 additions & 2 deletions arch/powerpc/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma, ptep, entry,
address, mmu_virtual_psize);
flush_tlb_page(vma, address);
}
return changed;
}
Expand Down Expand Up @@ -263,7 +262,6 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
flush_hugetlb_page(vma, addr);
}
return changed;
#endif
Expand Down

0 comments on commit bd5050e

Please sign in to comment.