Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54202
b: refs/heads/master
c: d85f338
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed May 7, 2007
1 parent 3f8c026 commit 231c51b
Show file tree
Hide file tree
Showing 10 changed files with 73 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 30520864839dc796fd314812e7036e754880b47d
refs/heads/master: d85f33855c303acfa87fa457157cef755b6087df
2 changes: 1 addition & 1 deletion trunk/arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ lazy_mmu_prot_update (pte_t pte)
return; /* i-cache is already coherent with d-cache */

if (PageCompound(page)) {
order = (unsigned long) (page[1].lru.prev);
order = compound_order(page);
flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
}
else
Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/hugetlbfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ static int hugetlbfs_symlink(struct inode *dir,
*/
static int hugetlbfs_set_page_dirty(struct page *page)
{
struct page *head = (struct page *)page_private(page);
struct page *head = compound_head(page);

SetPageDirty(head);
return 0;
Expand Down
33 changes: 28 additions & 5 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,17 +267,28 @@ static inline int get_page_unless_zero(struct page *page)
return atomic_inc_not_zero(&page->_count);
}

static inline struct page *compound_head(struct page *page)
{
/*
* We could avoid the PageCompound(page) check if
* we would not overload PageTail().
*
* This check has to be done in several performance critical
* paths of the slab etc. IMHO PageTail deserves its own flag.
*/
if (unlikely(PageCompound(page) && PageTail(page)))
return page->first_page;
return page;
}

static inline int page_count(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return atomic_read(&page->_count);
return atomic_read(&compound_head(page)->_count);
}

static inline void get_page(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
page = compound_head(page);
VM_BUG_ON(atomic_read(&page->_count) == 0);
atomic_inc(&page->_count);
}
Expand Down Expand Up @@ -314,6 +325,18 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
return (compound_page_dtor *)page[1].lru.next;
}

static inline int compound_order(struct page *page)
{
if (!PageCompound(page) || PageTail(page))
return 0;
return (unsigned long)page[1].lru.prev;
}

static inline void set_compound_order(struct page *page, unsigned long order)
{
page[1].lru.prev = (void *)order;
}

/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
Expand Down
14 changes: 14 additions & 0 deletions trunk/include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,12 @@
/* PG_owner_priv_1 users should have descriptive aliases */
#define PG_checked PG_owner_priv_1 /* Used by some filesystems */

/*
* Marks tail portion of a compound page. We currently do not reclaim
* compound pages so we can reuse a flag only used for reclaim here.
*/
#define PG_tail PG_reclaim

#if (BITS_PER_LONG > 32)
/*
* 64-bit-only flags build down from bit 31
Expand Down Expand Up @@ -241,6 +247,14 @@ static inline void SetPageUptodate(struct page *page)
#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)

/*
* Note: PG_tail is an alias of another page flag. The result of PageTail()
* is only valid if PageCompound(page) is true.
*/
#define PageTail(page) test_bit(PG_tail, &(page)->flags)
#define __SetPageTail(page) __set_bit(PG_tail, &(page)->flags)
#define __ClearPageTail(page) __clear_bit(PG_tail, &(page)->flags)

#ifdef CONFIG_SWAP
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v)
*/
static inline void set_page_refcounted(struct page *page)
{
VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page);
VM_BUG_ON(PageCompound(page) && PageTail(page));
VM_BUG_ON(atomic_read(&page->_count));
set_page_count(page, 1);
}
Expand Down
29 changes: 20 additions & 9 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ static void bad_page(struct page *page)

static void free_compound_page(struct page *page)
{
__free_pages_ok(page, (unsigned long)page[1].lru.prev);
__free_pages_ok(page, compound_order(page));
}

static void prep_compound_page(struct page *page, unsigned long order)
Expand All @@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order)
int nr_pages = 1 << order;

set_compound_page_dtor(page, free_compound_page);
page[1].lru.prev = (void *)order;
for (i = 0; i < nr_pages; i++) {
set_compound_order(page, order);
__SetPageCompound(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;

__SetPageTail(p);
__SetPageCompound(p);
set_page_private(p, (unsigned long)page);
p->first_page = page;
}
}

Expand All @@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)
int i;
int nr_pages = 1 << order;

if (unlikely((unsigned long)page[1].lru.prev != order))
if (unlikely(compound_order(page) != order))
bad_page(page);

for (i = 0; i < nr_pages; i++) {
if (unlikely(!PageCompound(page)))
bad_page(page);
__ClearPageCompound(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;

if (unlikely(!PageCompound(p) |
(page_private(p) != (unsigned long)page)))
if (unlikely(!PageCompound(p) | !PageTail(p) |
(p->first_page != page)))
bad_page(page);
__ClearPageTail(p);
__ClearPageCompound(p);
}
}
Expand Down Expand Up @@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page)
1 << PG_private |
1 << PG_locked |
1 << PG_active |
1 << PG_reclaim |
1 << PG_slab |
1 << PG_swapcache |
1 << PG_writeback |
1 << PG_reserved |
1 << PG_buddy ))))
bad_page(page);
/*
* PageReclaim == PageTail. It is only an error
* for PageReclaim to be set if PageCompound is clear.
*/
if (unlikely(!PageCompound(page) && PageReclaim(page)))
bad_page(page);
if (PageDirty(page))
__ClearPageDirty(page);
/*
Expand Down
6 changes: 2 additions & 4 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)

static inline struct kmem_cache *page_get_cache(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
page = compound_head(page);
BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
Expand All @@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab)

static inline struct slab *page_get_slab(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
page = compound_head(page);
BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
Expand Down
19 changes: 4 additions & 15 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)

page = virt_to_page(x);

if (unlikely(PageCompound(page)))
page = page->first_page;

page = compound_head(page);

if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
set_tracking(s, x, TRACK_FREE);
Expand All @@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x)
{
struct page *page = virt_to_page(x);

if (unlikely(PageCompound(page)))
page = page->first_page;
struct page *page = compound_head(virt_to_page(x));

if (!PageSlab(page))
return NULL;
Expand Down Expand Up @@ -2081,10 +2076,7 @@ void kfree(const void *x)
if (!x)
return;

page = virt_to_page(x);

if (unlikely(PageCompound(page)))
page = page->first_page;
page = compound_head(virt_to_page(x));

s = page->slab;

Expand Down Expand Up @@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return NULL;
}

page = virt_to_page(p);

if (unlikely(PageCompound(page)))
page = page->first_page;
page = compound_head(virt_to_page(p));

new_cache = get_slab(new_size, flags);

Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page)

static void put_compound_page(struct page *page)
{
page = (struct page *)page_private(page);
page = compound_head(page);
if (put_page_testzero(page)) {
compound_page_dtor *dtor;

Expand Down

0 comments on commit 231c51b

Please sign in to comment.