Skip to content

Commit

Permalink
powerpc/64s/hash: Simplify slb_flush_and_rebolt()
Browse files Browse the repository at this point in the history
slb_flush_and_rebolt() is misleading, it is called in virtual mode, so
it can not possibly change the stack, so it should not be touching the
shadow area. And since vmalloc is no longer bolted, it should not
change any bolted mappings at all.

Change the name to slb_flush_and_restore_bolted(), and have it just
load the kernel stack from what's currently in the shadow SLB area.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Oct 14, 2018
1 parent 5434ae7 commit 94ee427
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 35 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/book3s/64/mmu-hash.h
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ struct slb_entry {
};

extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
void slb_flush_and_restore_bolted(void);
void slb_flush_all_realmode(void);
void __slb_restore_bolted_realmode(void);
void slb_restore_bolted_realmode(void);
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/swsusp_asm64.S
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)

addi r1,r1,-128
#ifdef CONFIG_PPC_BOOK3S_64
bl slb_flush_and_rebolt
bl slb_flush_and_restore_bolted
#endif
bl do_after_copyback
addi r1,r1,128
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1125,7 +1125,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {

copy_mm_to_paca(mm);
slb_flush_and_rebolt();
slb_flush_and_restore_bolted();
}
}
#endif /* CONFIG_PPC_64K_PAGES */
Expand Down Expand Up @@ -1197,7 +1197,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
if (user_region) {
if (psize != get_paca_psize(ea)) {
copy_mm_to_paca(mm);
slb_flush_and_rebolt();
slb_flush_and_restore_bolted();
}
} else if (get_paca()->vmalloc_sllp !=
mmu_psize_defs[mmu_vmalloc_psize].sllp) {
Expand Down
46 changes: 16 additions & 30 deletions arch/powerpc/mm/slb.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,6 @@ void __slb_restore_bolted_realmode(void)

/*
* Insert the bolted entries into an empty SLB.
* This is not the same as rebolt because the bolted segments are not
* changed, just loaded from the shadow area.
*/
void slb_restore_bolted_realmode(void)
{
Expand All @@ -135,12 +133,15 @@ void slb_flush_all_realmode(void)
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}

void slb_flush_and_rebolt(void)
/*
* This flushes non-bolted entries, it can be run in virtual mode. Must
* be called with interrupts disabled.
*/
void slb_flush_and_restore_bolted(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* and PR KVM appropriately too. */
unsigned long linear_llp, lflags;
unsigned long ksp_esid_data, ksp_vsid_data;
struct slb_shadow *p = get_slb_shadow();

BUILD_BUG_ON(SLB_NUM_BOLTED != 2);

WARN_ON(!irqs_disabled());

Expand All @@ -150,30 +151,12 @@ void slb_flush_and_rebolt(void)
*/
hard_irq_disable();

linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;

ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
ksp_vsid_data = 0;
slb_shadow_clear(KSTACK_INDEX);
} else {
/* Update stack entry; others don't change */
slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
ksp_vsid_data =
be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
}

/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
"slbia\n"
/* Slot 1 - kernel stack */
"slbmte %0,%1\n"
"isync"
:: "r"(ksp_vsid_data),
"r"(ksp_esid_data)
"slbmte %0, %1\n"
"isync\n"
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
: "memory");

get_paca()->slb_cache_ptr = 0;
Expand Down Expand Up @@ -254,7 +237,10 @@ void slb_dump_contents(struct slb_entry *slb_ptr)

void slb_vmalloc_update(void)
{
slb_flush_and_rebolt();
/*
* vmalloc is not bolted, so just have to flush non-bolted.
*/
slb_flush_and_restore_bolted();
}

static bool preload_hit(struct thread_info *ti, unsigned long esid)
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/mm/slice.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ static void slice_flush_segments(void *parm)
copy_mm_to_paca(current->active_mm);

local_irq_save(flags);
slb_flush_and_rebolt();
slb_flush_and_restore_bolted();
local_irq_restore(flags);
#endif
}
Expand Down

0 comments on commit 94ee427

Please sign in to comment.