Skip to content

Commit

Permalink
asm-generic/tlb: Remove arch_tlb*_mmu()
Browse files Browse the repository at this point in the history
Now that all architectures are converted to the generic code, remove
the arch hooks.

No change in behavior intended.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 3, 2019
1 parent 9de7d83 commit 1808d65
Showing 1 changed file with 42 additions and 51 deletions.
93 changes: 42 additions & 51 deletions mm/mmu_gather.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,33 +93,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_

#endif /* HAVE_MMU_GATHER_NO_GATHER */

void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;

/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif

__tlb_reset_range(tlb);
}

void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
Expand All @@ -136,27 +109,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}

/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}

tlb_flush_mmu(tlb);

/* keep the page table cache within bounds */
check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
}

#endif /* HAVE_GENERIC_MMU_GATHER */

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
Expand Down Expand Up @@ -258,10 +210,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
arch_tlb_gather_mmu(tlb, mm, start, end);
tlb->mm = mm;

/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif

__tlb_reset_range(tlb);
inc_tlb_flush_pending(tlb->mm);
}

/**
* tlb_finish_mmu - finish an mmu_gather structure
* @tlb: the mmu_gather structure to finish
* @start: start of the region that will be removed from the page-table
* @end: end of the region that will be removed from the page-table
*
* Called at the end of the shootdown operation to free up any resources that
* were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
Expand All @@ -272,8 +254,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
* forcefully if we detect parallel PTE batching threads.
*/
bool force = mm_tlb_flush_nested(tlb->mm);
if (mm_tlb_flush_nested(tlb->mm)) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}

arch_tlb_finish_mmu(tlb, start, end, force);
tlb_flush_mmu(tlb);

/* keep the page table cache within bounds */
check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
dec_tlb_flush_pending(tlb->mm);
}

0 comments on commit 1808d65

Please sign in to comment.