Skip to content

Commit

Permalink
x86: optimize clflush
Browse files Browse the repository at this point in the history
clflush is sufficient to be issued on one CPU. The invalidation is
broadcast throughout the coherence domain.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Jan 30, 2008
1 parent cd8ddf1 commit 3b233e5
Showing 1 changed file with 8 additions and 14 deletions.
22 changes: 8 additions & 14 deletions arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,35 +64,29 @@ static void cpa_flush_all(void)
on_each_cpu(__cpa_flush_all, NULL, 1, 1);
}

struct clflush_data {
unsigned long addr;
int numpages;
};

static void __cpa_flush_range(void *arg)
{
struct clflush_data *cld = arg;

/*
* We could optimize that further and do individual per page
* tlb invalidates for a low number of pages. Caveat: we must
* flush the high aliases on 64bit as well.
*/
__flush_tlb_all();

clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
}

static void cpa_flush_range(unsigned long addr, int numpages)
{
struct clflush_data cld;

BUG_ON(irqs_disabled());

cld.addr = addr;
cld.numpages = numpages;
on_each_cpu(__cpa_flush_range, NULL, 1, 1);

on_each_cpu(__cpa_flush_range, &cld, 1, 1);
/*
* We only need to flush on one CPU,
* clflush is a MESI-coherent instruction that
* will cause all other CPUs to flush the same
* cachelines:
*/
clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
}

/*
Expand Down

0 comments on commit 3b233e5

Please sign in to comment.