Skip to content

Commit

Permalink
s390/mm: fix local TLB flushing vs. detach of an mm address space
Browse files Browse the repository at this point in the history
The local TLB flushing code keeps an additional mask in the mm.context,
the cpu_attach_mask. At the time a global flush of an address space is
done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
future global flushes in case the mm is used by a single CPU only after
the flush.

Trouble is that the reset of the mm_cpumask is racy against the detach
of an mm address space by switch_mm. The current order is first the
global TLB flush and then the copy of the cpu_attach_mask to the
mm_cpumask. The order needs to be the other way around.

Cc: <stable@vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Martin Schwidefsky committed Sep 6, 2017
1 parent 46fde9a commit b3e5dc4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 23 deletions.
4 changes: 2 additions & 2 deletions arch/s390/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
Expand All @@ -121,7 +120,7 @@ static inline void finish_arch_post_lock_switch(void)
preempt_disable();
while (atomic_read(&mm->context.flush_count))
cpu_relax();

cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
Expand All @@ -136,6 +135,7 @@ static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}

Expand Down
26 changes: 5 additions & 21 deletions arch/s390/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
*/
static inline void __tlb_flush_full(struct mm_struct *mm)
{
preempt_disable();
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
/* Local TLB flush */
__tlb_flush_local();
} else {
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
}
atomic_dec(&mm->context.flush_count);
preempt_enable();
}

static inline void __tlb_flush_mm(struct mm_struct *mm)
{
unsigned long gmap_asce;
Expand All @@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
*/
preempt_disable();
atomic_inc(&mm->context.flush_count);
/* Reset TLB flush mask */
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
barrier();
gmap_asce = READ_ONCE(mm->context.gmap_asce);
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
if (gmap_asce)
__tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
} else {
__tlb_flush_full(mm);
/* Global TLB flush */
__tlb_flush_global();
}
/* Reset TLB flush mask */
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
Expand All @@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
}
#else
#define __tlb_flush_global() __tlb_flush_local()
#define __tlb_flush_full(mm) __tlb_flush_local()

/*
* Flush TLB entries for a specific ASCE on all CPUs.
Expand Down

0 comments on commit b3e5dc4

Please sign in to comment.