Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 126237
b: refs/heads/master
c: 8cc3b39
h: refs/heads/master
i:
  126235: ac79112
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Jan 6, 2009
1 parent 3441753 commit 7cb5601
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 79f4b7bf393e67bbffec807cc68caaefc72b82ee
refs/heads/master: 8cc3b39221b0ecbd83a338948a8396df097fc656
52 changes: 24 additions & 28 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,9 @@ static void bad_page(struct page *page)
printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
KERN_EMERG "Backtrace:\n");
dump_stack();
set_page_count(page, 0);
reset_page_mapcount(page);
page->mapping = NULL;

/* Leave bad fields for debug, except PageBuddy could make trouble */
__ClearPageBuddy(page);
add_taint(TAINT_BAD_PAGE);
}

Expand Down Expand Up @@ -290,25 +290,31 @@ void prep_compound_gigantic_page(struct page *page, unsigned long order)
}
#endif

static void destroy_compound_page(struct page *page, unsigned long order)
static int destroy_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
int bad = 0;

if (unlikely(compound_order(page) != order))
if (unlikely(compound_order(page) != order) ||
unlikely(!PageHead(page))) {
bad_page(page);
bad++;
}

if (unlikely(!PageHead(page)))
bad_page(page);
__ClearPageHead(page);

for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;

if (unlikely(!PageTail(p) |
(p->first_page != page)))
if (unlikely(!PageTail(p) | (p->first_page != page))) {
bad_page(page);
bad++;
}
__ClearPageTail(p);
}

return bad;
}

static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
Expand Down Expand Up @@ -428,7 +434,8 @@ static inline void __free_one_page(struct page *page,
int migratetype = get_pageblock_migratetype(page);

if (unlikely(PageCompound(page)))
destroy_compound_page(page, order);
if (unlikely(destroy_compound_page(page, order)))
return;

page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Expand Down Expand Up @@ -465,15 +472,10 @@ static inline int free_pages_check(struct page *page)
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(page_count(page) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
bad_page(page);
/*
* For now, we report if PG_reserved was found set, but do not
* clear it, and do not free the page. But we shall soon need
* to do more, for when the ZERO_PAGE count wraps negative.
*/
if (PageReserved(page))
return 1;
}
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
Expand Down Expand Up @@ -521,11 +523,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int i;
int reserved = 0;
int bad = 0;

for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
if (reserved)
bad += free_pages_check(page + i);
if (bad)
return;

if (!PageHighMem(page)) {
Expand Down Expand Up @@ -610,17 +612,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(page_count(page) != 0) |
(page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
bad_page(page);

/*
* For now, we report if PG_reserved was found set, but do not
* clear it, and do not allocate the page: as a safety net.
*/
if (PageReserved(page))
return 1;
}

page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
set_page_private(page, 0);
set_page_refcounted(page);

Expand Down

0 comments on commit 7cb5601

Please sign in to comment.