Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 99945
b: refs/heads/master
c: 67350a5
h: refs/heads/master
i:
  99943: 00d5bba
v: v3
  • Loading branch information
Jeremy Fitzhardinge authored and Ingo Molnar committed Jul 8, 2008
1 parent 0f6a8da commit ead3e9f
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 52 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 15878c0b21b7b04a08108e9027ebbbd68a2502e0
refs/heads/master: 67350a5c4514c280665cdb45439d32a008a264ba
77 changes: 26 additions & 51 deletions trunk/arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -903,71 +903,46 @@ LIST_HEAD(pgd_list);
void vmalloc_sync_all(void)
{
#ifdef CONFIG_X86_32
/*
* Note that races in the updates of insync and start aren't
* problematic: insync can only get set bits added, and updates to
* start are only improving performance (without affecting correctness
* if undone).
*/
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
static unsigned long start = TASK_SIZE;
unsigned long start = VMALLOC_START & PGDIR_MASK;
unsigned long address;

if (SHARED_KERNEL_PMD)
return;

BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
unsigned long flags;
struct page *page;

spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
if (!vmalloc_sync_one(page_address(page),
address))
break;
}
spin_unlock_irqrestore(&pgd_lock, flags);
if (!page)
set_bit(pgd_index(address), insync);
unsigned long flags;
struct page *page;

spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
if (!vmalloc_sync_one(page_address(page),
address))
break;
}
if (address == start && test_bit(pgd_index(address), insync))
start = address + PGDIR_SIZE;
spin_unlock_irqrestore(&pgd_lock, flags);
}
#else /* CONFIG_X86_64 */
/*
* Note that races in the updates of insync and start aren't
* problematic: insync can only get set bits added, and updates to
* start are only improving performance (without affecting correctness
* if undone).
*/
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
static unsigned long start = VMALLOC_START & PGDIR_MASK;
unsigned long start = VMALLOC_START & PGDIR_MASK;
unsigned long address;

for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags;
struct page *page;

if (pgd_none(*pgd_ref))
continue;
spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
spin_unlock_irqrestore(&pgd_lock, flags);
set_bit(pgd_index(address), insync);
const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags;
struct page *page;

if (pgd_none(*pgd_ref))
continue;
spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
if (address == start)
start = address + PGDIR_SIZE;
spin_unlock_irqrestore(&pgd_lock, flags);
}
#endif
}

0 comments on commit ead3e9f

Please sign in to comment.