From ae83a3db89d41bc8769ec902dc17610841435521 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 30 Jan 2008 13:34:08 +0100 Subject: [PATCH] --- yaml --- r: 80540 b: refs/heads/master c: 57a6a46aa26d6e39c62daf8b3b96e94f76e6846f h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/mm/pageattr.c | 42 +++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 113f25f52c5b..af42b2cc02eb 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 56744546b3e5379177a70e7306c6283f727e4732 +refs/heads/master: 57a6a46aa26d6e39c62daf8b3b96e94f76e6846f diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c index 7823adab96e4..bbe691dd272e 100644 --- a/trunk/arch/x86/mm/pageattr.c +++ b/trunk/arch/x86/mm/pageattr.c @@ -52,6 +52,37 @@ static void global_flush_tlb(void) on_each_cpu(flush_kernel_map, NULL, 1, 1); } +struct clflush_data { + unsigned long addr; + int numpages; +}; + +static void __cpa_flush_range(void *arg) +{ + struct clflush_data *cld = arg; + + /* + * We could optimize that further and do individual per page + * tlb invalidates for a low number of pages. Caveat: we must + * flush the high aliases on 64bit as well. + */ + __flush_tlb_all(); + + clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE); +} + +static void cpa_flush_range(unsigned long addr, int numpages) +{ + struct clflush_data cld; + + BUG_ON(irqs_disabled()); + + cld.addr = addr; + cld.numpages = numpages; + + on_each_cpu(__cpa_flush_range, &cld, 1, 1); +} + /* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this @@ -316,7 +347,16 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, int ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr); - global_flush_tlb(); + /* + * On success we use clflush, when the CPU supports it to + * avoid the wbindv. If the CPU does not support it and in the + * error case we fall back to global_flush_tlb (which uses + * wbindv): + */ + if (!ret && cpu_has_clflush) + cpa_flush_range(addr, numpages); + else + global_flush_tlb(); return ret; }