Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 86333
b: refs/heads/master
c: 92cb54a
h: refs/heads/master
i:
  86331: 3b4f54b
v: v3
  • Loading branch information
Ingo Molnar committed Feb 26, 2008
1 parent b41a51a commit 38837b1
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 34 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1ce70c4fac3c3954bd48c035f448793867592bc0
refs/heads/master: 92cb54a37a42a41cfb2ef7f1478bfa4395198258
84 changes: 51 additions & 33 deletions trunk/arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@ static inline unsigned long highmap_end_pfn(void)

#endif

#ifdef CONFIG_DEBUG_PAGEALLOC
# define debug_pagealloc 1
#else
# define debug_pagealloc 0
#endif

static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
Expand Down Expand Up @@ -355,45 +361,48 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,

static LIST_HEAD(page_pool);
static unsigned long pool_size, pool_pages, pool_low;
static unsigned long pool_used, pool_failed, pool_refill;
static unsigned long pool_used, pool_failed;

static void cpa_fill_pool(void)
static void cpa_fill_pool(struct page **ret)
{
struct page *p;
gfp_t gfp = GFP_KERNEL;
unsigned long flags;
struct page *p;

/* Do not allocate from interrupt context */
if (in_irq() || irqs_disabled())
return;
/*
* Check unlocked. I does not matter when we have one more
* page in the pool. The bit lock avoids recursive pool
* allocations:
* Avoid recursion (on debug-pagealloc) and also signal
* our priority to get to these pagetables:
*/
if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill))
if (current->flags & PF_MEMALLOC)
return;
current->flags |= PF_MEMALLOC;

#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* We could do:
* gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
* but this fails on !PREEMPT kernels
* Allocate atomically from atomic contexts:
*/
gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
#endif
if (in_atomic() || irqs_disabled() || debug_pagealloc)
gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;

while (pool_pages < pool_size) {
while (pool_pages < pool_size || (ret && !*ret)) {
p = alloc_pages(gfp, 0);
if (!p) {
pool_failed++;
break;
}
spin_lock_irq(&pgd_lock);
/*
* If the call site needs a page right now, provide it:
*/
if (ret && !*ret) {
*ret = p;
continue;
}
spin_lock_irqsave(&pgd_lock, flags);
list_add(&p->lru, &page_pool);
pool_pages++;
spin_unlock_irq(&pgd_lock);
spin_unlock_irqrestore(&pgd_lock, flags);
}
clear_bit_unlock(0, &pool_refill);

current->flags &= ~PF_MEMALLOC;
}

#define SHIFT_MB (20 - PAGE_SHIFT)
Expand All @@ -414,11 +423,15 @@ void __init cpa_init(void)
* GiB. Shift MiB to Gib and multiply the result by
* POOL_PAGES_PER_GB:
*/
gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
pool_size = POOL_PAGES_PER_GB * gb;
if (debug_pagealloc) {
gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
pool_size = POOL_PAGES_PER_GB * gb;
} else {
pool_size = 1;
}
pool_low = pool_size;

cpa_fill_pool();
cpa_fill_pool(NULL);
printk(KERN_DEBUG
"CPA: page pool initialized %lu of %lu pages preallocated\n",
pool_pages, pool_size);
Expand All @@ -440,16 +453,20 @@ static int split_large_page(pte_t *kpte, unsigned long address)
spin_lock_irqsave(&pgd_lock, flags);
if (list_empty(&page_pool)) {
spin_unlock_irqrestore(&pgd_lock, flags);
return -ENOMEM;
base = NULL;
cpa_fill_pool(&base);
if (!base)
return -ENOMEM;
spin_lock_irqsave(&pgd_lock, flags);
} else {
base = list_first_entry(&page_pool, struct page, lru);
list_del(&base->lru);
pool_pages--;

if (pool_pages < pool_low)
pool_low = pool_pages;
}

base = list_first_entry(&page_pool, struct page, lru);
list_del(&base->lru);
pool_pages--;

if (pool_pages < pool_low)
pool_low = pool_pages;

/*
* Check for races, another CPU might have split this page
* up for us already:
Expand Down Expand Up @@ -734,7 +751,8 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
cpa_flush_all(cache);

out:
cpa_fill_pool();
cpa_fill_pool(NULL);

return ret;
}

Expand Down Expand Up @@ -897,7 +915,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* Try to refill the page pool here. We can do this only after
* the tlb flush.
*/
cpa_fill_pool();
cpa_fill_pool(NULL);
}

#ifdef CONFIG_HIBERNATION
Expand Down

0 comments on commit 38837b1

Please sign in to comment.