Skip to content

Commit

Permalink
powerpc/book3s64/hash: Use the pte_t address from the caller
Browse files Browse the repository at this point in the history
Don't fetch the pte value using lockless page table walk. Instead use the value from the
caller. hash_preload is called with ptl lock held. So it is safe to use the
pte_t address directly.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-6-aneesh.kumar@linux.ibm.com
  • Loading branch information
Aneesh Kumar K.V authored and Michael Ellerman committed May 5, 2020
1 parent 7900757 commit 2f92447
Showing 1 changed file with 5 additions and 22 deletions.
27 changes: 5 additions & 22 deletions arch/powerpc/mm/book3s64/hash_utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
}
#endif

static void hash_preload(struct mm_struct *mm, unsigned long ea,
static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
bool is_exec, unsigned long trap)
{
int hugepage_shift;
unsigned long vsid;
pgd_t *pgdir;
pte_t *ptep;
unsigned long flags;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);

Expand All @@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid)
return;
/*
* Hash doesn't like irqs. Walking linux page table with irq disabled
* saves us from holding multiple locks.
*/
local_irq_save(flags);

/*
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
if (!ptep)
goto out_exit;

WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES
/* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment
* page size demotion here
* Called with PTL held, hence can be sure the value won't change in
* between.
*/
if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
goto out_exit;
return;
#endif /* CONFIG_PPC_64K_PAGES */

/* Is that local to this CPU ? */
Expand All @@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
out_exit:
local_irq_restore(flags);
}

/*
Expand Down Expand Up @@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
return;
}

hash_preload(vma->vm_mm, address, is_exec, trap);
hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Expand Down

0 comments on commit 2f92447

Please sign in to comment.