diff --git a/[refs] b/[refs] index 760a053227e5..0cbf2c124468 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f5a50ce1bf53a07cb7d0bab1a87e62cc4f34f0ab +refs/heads/master: ace63e3743ae59fc0cce48450bd2e410776b4148 diff --git a/trunk/arch/x86/mm/pageattr_64.c b/trunk/arch/x86/mm/pageattr_64.c index 4053832d4108..e1c860800ff1 100644 --- a/trunk/arch/x86/mm/pageattr_64.c +++ b/trunk/arch/x86/mm/pageattr_64.c @@ -260,3 +260,33 @@ void global_flush_tlb(void) on_each_cpu(flush_kernel_map, NULL, 1, 1); } EXPORT_SYMBOL(global_flush_tlb); + +#ifdef CONFIG_DEBUG_PAGEALLOC +void kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (PageHighMem(page)) + return; + if (!enable) { + debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); + } + + /* + * If page allocator is not up yet then do not call c_p_a(): + */ + if (!debug_pagealloc_enabled) + return; + + /* + * the return value is ignored - the calls cannot fail, + * large pages are disabled at boot time. + */ + change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); + + /* + * we should perform an IPI and flush all tlbs, + * but that can deadlock->flush only current cpu. + */ + __flush_tlb_all(); +} +#endif