Skip to content

Commit

Permalink
Merge tag 'asm-generic-mmu-context-5.11' of git://git.kernel.org/pub/…
Browse files Browse the repository at this point in the history
…scm/linux/kernel/git/arnd/asm-generic

Pull asm-generic mmu-context cleanup from Arnd Bergmann:
 "This is a cleanup series from Nicholas Piggin, preparing for later
  changes. The asm/mmu_context.h header are generalized and common code
  moved to asm-gneneric/mmu_context.h.

  This saves a bit of code and makes it easier to change in the future"

* tag 'asm-generic-mmu-context-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (25 commits)
  h8300: Fix generic mmu_context build
  m68k: mmu_context: Fix Sun-3 build
  xtensa: use asm-generic/mmu_context.h for no-op implementations
  x86: use asm-generic/mmu_context.h for no-op implementations
  um: use asm-generic/mmu_context.h for no-op implementations
  sparc: use asm-generic/mmu_context.h for no-op implementations
  sh: use asm-generic/mmu_context.h for no-op implementations
  s390: use asm-generic/mmu_context.h for no-op implementations
  riscv: use asm-generic/mmu_context.h for no-op implementations
  powerpc: use asm-generic/mmu_context.h for no-op implementations
  parisc: use asm-generic/mmu_context.h for no-op implementations
  openrisc: use asm-generic/mmu_context.h for no-op implementations
  nios2: use asm-generic/mmu_context.h for no-op implementations
  nds32: use asm-generic/mmu_context.h for no-op implementations
  mips: use asm-generic/mmu_context.h for no-op implementations
  microblaze: use asm-generic/mmu_context.h for no-op implementations
  m68k: use asm-generic/mmu_context.h for no-op implementations
  ia64: use asm-generic/mmu_context.h for no-op implementations
  hexagon: use asm-generic/mmu_context.h for no-op implementations
  csky: use asm-generic/mmu_context.h for no-op implementations
  ...
  • Loading branch information
Linus Torvalds committed Dec 16, 2020
2 parents e2dc495 + c363442 commit 1578071
Show file tree
Hide file tree
Showing 31 changed files with 182 additions and 276 deletions.
12 changes: 4 additions & 8 deletions arch/alpha/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,6 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
tbiap();
}

#define deactivate_mm(tsk,mm) do { } while (0)

#ifdef CONFIG_ALPHA_GENERIC
# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
Expand All @@ -229,6 +227,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
# endif
#endif

#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
Expand All @@ -242,19 +241,16 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}

extern inline void
destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}

#define enter_lazy_tlb enter_lazy_tlb
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}

#include <asm-generic/mmu_context.h>

#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __MMU_EXTERN_INLINE
Expand Down
17 changes: 9 additions & 8 deletions arch/arc/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
* Initialize the context related info for a new mm_struct
* instance.
*/
#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
Expand All @@ -113,6 +114,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}

#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
Expand Down Expand Up @@ -153,13 +155,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}

/*
* Called at the time of execve() to get a new ASID
* Note the subtlety here: get_new_mmu_context() behaves differently here
* vs. in switch_mm(). Here it always returns a new ASID, because mm has
* an unallocated "initial" value, while in latter, it moves to a new ASID,
* only if it was unallocated
* activate_mm defaults (in asm-generic) to switch_mm and is called at the
* time of execve() to get a new ASID Note the subtlety here:
* get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
* it always returns a new ASID, because mm has an unallocated "initial"
* value, while in latter, it moves to a new ASID, only if it was
* unallocated
*/
#define activate_mm(prev, next) switch_mm(prev, next, NULL)

/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
Expand All @@ -168,8 +170,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* there is a good chance that task gets sched-out/in, making it's ASID valid
* again (this teased me for a whole day).
*/
#define deactivate_mm(tsk, mm) do { } while (0)

#define enter_lazy_tlb(mm, tsk)
#include <asm-generic/mmu_context.h>

#endif /* __ASM_ARC_MMU_CONTEXT_H */
26 changes: 3 additions & 23 deletions arch/arm/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID

void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);

#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
Expand Down Expand Up @@ -92,32 +94,10 @@ static inline void finish_arch_post_lock_switch(void)

#endif /* CONFIG_MMU */

static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
return 0;
}


#endif /* CONFIG_CPU_HAS_ASID */

#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)

/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
* mm: describes the currently active mm context
* tsk: task which is entering lazy tlb
* cpu: cpu number which is entering lazy tlb
*
* tsk->mm will be NULL
*/
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
Expand Down Expand Up @@ -149,6 +129,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}

#define deactivate_mm(tsk,mm) do { } while (0)
#include <asm-generic/mmu_context.h>

#endif
8 changes: 4 additions & 4 deletions arch/arm64/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,9 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
* Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
* take CPU migration into account.
*/
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm);

#define init_new_context(tsk, mm) init_new_context(tsk, mm)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
Expand Down Expand Up @@ -208,6 +208,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
}
#endif

#define enter_lazy_tlb enter_lazy_tlb
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
Expand Down Expand Up @@ -248,15 +249,14 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
update_saved_ttbr0(tsk, next);
}

#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, current)

void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);

unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);

#include <asm-generic/mmu_context.h>

#endif /* !__ASSEMBLY__ */

#endif /* !__ASM_MMU_CONTEXT_H */
6 changes: 6 additions & 0 deletions arch/c6x/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#ifndef _ASM_C6X_MMU_CONTEXT_H
#define _ASM_C6X_MMU_CONTEXT_H

#include <asm-generic/nommu_context.h>

#endif /* _ASM_C6X_MMU_CONTEXT_H */
8 changes: 3 additions & 5 deletions arch/csky/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@
#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)

#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
#define activate_mm(prev,next) switch_mm(prev, next, current)

#define destroy_context(mm) do {} while (0)
#define enter_lazy_tlb(mm, tsk) do {} while (0)
#define deactivate_mm(tsk, mm) do {} while (0)

void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);

Expand All @@ -46,4 +41,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,

flush_icache_deferred(next);
}

#include <asm-generic/mmu_context.h>

#endif /* __ASM_CSKY_MMU_CONTEXT_H */
6 changes: 6 additions & 0 deletions arch/h8300/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#ifndef _ASM_H8300_MMU_CONTEXT_H
#define _ASM_H8300_MMU_CONTEXT_H

#include <asm-generic/nommu_context.h>

#endif /* _ASM_H8300_MMU_CONTEXT_H */
33 changes: 5 additions & 28 deletions arch/hexagon/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,39 +15,13 @@
#include <asm/pgalloc.h>
#include <asm/mem-layout.h>

static inline void destroy_context(struct mm_struct *mm)
{
}

/*
* VM port hides all TLB management, so "lazy TLB" isn't very
* meaningful. Even for ports to architectures with visble TLBs,
* this is almost invariably a null function.
*
* mm->context is set up by pgd_alloc, so no init_new_context required.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}

/*
* Architecture-specific actions, if any, for memory map deactivation.
*/
static inline void deactivate_mm(struct task_struct *tsk,
struct mm_struct *mm)
{
}

/**
* init_new_context - initialize context related info for new mm_struct instance
* @tsk: pointer to a task struct
* @mm: pointer to a new mm struct
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
/* mm->context is set up by pgd_alloc */
return 0;
}

/*
* Switch active mm context
Expand All @@ -74,6 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/*
* Activate new memory map for task
*/
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
Expand All @@ -86,4 +61,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
#include <asm-generic/mm_hooks.h>

#include <asm-generic/mmu_context.h>

#endif
17 changes: 4 additions & 13 deletions arch/ia64/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,6 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void mmu_context_init (void);
extern void wrap_mmu_context (struct mm_struct *mm);

static inline void
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
* When the context counter wraps around all TLBs need to be flushed because
* an old context number might have been reused. This is signalled by the
Expand Down Expand Up @@ -116,19 +111,14 @@ get_mmu_context (struct mm_struct *mm)
* Initialize context number to some sane value. MM is guaranteed to be a
* brand-new address-space, so no TLB flushing is needed, ever.
*/
#define init_new_context init_new_context
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
mm->context = 0;
return 0;
}

static inline void
destroy_context (struct mm_struct *mm)
{
/* Nothing to do. */
}

static inline void
reload_context (nv_mm_context_t context)
{
Expand Down Expand Up @@ -178,11 +168,10 @@ activate_context (struct mm_struct *mm)
} while (unlikely(context != mm->context));
}

#define deactivate_mm(tsk,mm) do { } while (0)

/*
* Switch from address space PREV to address space NEXT.
*/
#define activate_mm activate_mm
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
Expand All @@ -196,5 +185,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)

#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)

#include <asm-generic/mmu_context.h>

# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */
Loading

0 comments on commit 1578071

Please sign in to comment.