Skip to content

Commit

Permalink
x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions
Browse files Browse the repository at this point in the history
No behavior change.

Move some of vmalloc_sync_all() code into a new function
sync_global_pgds() that will be useful for memory hotplug.

Signed-off-by: Haicheng Li <haicheng.li@linux.intel.com>
LKML-Reference: <4C6E4ECD.1090607@linux.intel.com>
Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Haicheng Li authored and H. Peter Anvin committed Aug 26, 2010
1 parent 61c7732 commit 6afb515
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 23 deletions.
2 changes: 2 additions & 0 deletions arch/x86/include/asm/pgtable_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}

extern void sync_global_pgds(unsigned long start, unsigned long end);

/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
Expand Down
24 changes: 1 addition & 23 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -326,29 +326,7 @@ static void dump_pagetable(unsigned long address)

void vmalloc_sync_all(void)
{
unsigned long address;

for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
address += PGDIR_SIZE) {

const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags;
struct page *page;

if (pgd_none(*pgd_ref))
continue;

spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
spin_unlock_irqrestore(&pgd_lock, flags);
}
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}

/*
Expand Down
30 changes: 30 additions & 0 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,36 @@ static int __init nonx32_setup(char *str)
}
__setup("noexec32=", nonx32_setup);

/*
* When memory was added/removed make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
void sync_global_pgds(unsigned long start, unsigned long end)
{
unsigned long address;

for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags;
struct page *page;

if (pgd_none(*pgd_ref))
continue;

spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd)
!= pgd_page_vaddr(*pgd_ref));
}
spin_unlock_irqrestore(&pgd_lock, flags);
}
}

/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
Expand Down

0 comments on commit 6afb515

Please sign in to comment.