Skip to content

Commit

Permalink
x86/mm: Consolidate full flush threshold decision
Browse files Browse the repository at this point in the history
Reduce code duplication by consolidating the decision point for whether to do
individual invalidations or a full flush inside get_flush_tlb_info().

Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Dave Hansen <dave.hansen@intel.com>
Link: https://lore.kernel.org/r/20250226030129.530345-2-riel@surriel.com
  • Loading branch information
Rik van Riel authored and Ingo Molnar committed Mar 19, 2025
1 parent 631ca89 commit 4a02ed8
Showing 1 changed file with 19 additions and 22 deletions.
41 changes: 19 additions & 22 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1000,6 +1000,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
#endif

/*
* If the number of flushes is so large that a full flush
* would be faster, do a full flush.
*/
if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
start = 0;
end = TLB_FLUSH_ALL;
}

info->start = start;
info->end = end;
info->mm = mm;
Expand All @@ -1026,17 +1035,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
bool freed_tables)
{
struct flush_tlb_info *info;
int cpu = get_cpu();
u64 new_tlb_gen;
int cpu;

cpu = get_cpu();

/* Should we flush just the requested range? */
if ((end == TLB_FLUSH_ALL) ||
((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
start = 0;
end = TLB_FLUSH_ALL;
}

/* This is also a barrier that synchronizes with switch_mm(). */
new_tlb_gen = inc_mm_tlb_gen(mm);
Expand Down Expand Up @@ -1089,22 +1089,19 @@ static void do_kernel_range_flush(void *info)

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
/* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL ||
(end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
on_each_cpu(do_flush_tlb_all, NULL, 1);
} else {
struct flush_tlb_info *info;
struct flush_tlb_info *info;

guard(preempt)();

preempt_disable();
info = get_flush_tlb_info(NULL, start, end, 0, false,
TLB_GENERATION_INVALID);
info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
TLB_GENERATION_INVALID);

if (info->end == TLB_FLUSH_ALL)
on_each_cpu(do_flush_tlb_all, NULL, 1);
else
on_each_cpu(do_kernel_range_flush, info, 1);

put_flush_tlb_info();
preempt_enable();
}
put_flush_tlb_info();
}

/*
Expand Down

0 comments on commit 4a02ed8

Please sign in to comment.