Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 22473
b: refs/heads/master
c: 8409751
h: refs/heads/master
i:
  22471: 2705e3f
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Mar 22, 2006
1 parent 8b87643 commit b303234
Show file tree
Hide file tree
Showing 7 changed files with 12 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0f8053a509ceba4a077a50ea7b77039b5559b428
refs/heads/master: 84097518d1ecd2330f9488e4c2d09953a3340e74
3 changes: 1 addition & 2 deletions trunk/fs/ramfs/file-nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
xpages = 1UL << order;
npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;

for (loop = 0; loop < npages; loop++)
set_page_count(pages + loop, 1);
split_page(pages, order);

/* trim off any pages we don't actually require */
for (loop = npages; loop < xpages; loop++)
Expand Down
4 changes: 0 additions & 4 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -327,11 +327,7 @@ static inline void get_page(struct page *page)

void put_page(struct page *page);

#ifdef CONFIG_MMU
void split_page(struct page *page, unsigned int order);
#else
static inline void split_page(struct page *page, unsigned int order) {}
#endif

/*
* Multiple processes may "see" the same page. E.g. for untouched
Expand Down
12 changes: 0 additions & 12 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,7 @@

static inline void set_page_refs(struct page *page, int order)
{
#ifdef CONFIG_MMU
set_page_count(page, 1);
#else
int i;

/*
* We need to reference all the pages for this order, otherwise if
* anyone accesses one of the pages with (get/put) it will be freed.
* - eg: access_process_vm()
*/
for (i = 0; i < (1 << order); i++)
set_page_count(page + i, 1);
#endif /* CONFIG_MMU */
}

static inline void __put_page(struct page *page)
Expand Down
4 changes: 2 additions & 2 deletions trunk/mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
*/
return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
}

struct page * vmalloc_to_page(void *addr)
Expand Down Expand Up @@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
* - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page
*/
base = kmalloc(len, GFP_KERNEL);
base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
if (!base)
goto enomem;

Expand Down
7 changes: 0 additions & 7 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
mutex_debug_check_no_locks_freed(page_address(page),
PAGE_SIZE<<order);

#ifndef CONFIG_MMU
for (i = 1 ; i < (1 << order) ; ++i)
__put_page(page + i);
#endif

for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
if (reserved)
Expand Down Expand Up @@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i);
}

#ifdef CONFIG_MMU
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
Expand All @@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
set_page_count(page + i, 1);
}
}
#endif

/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
Expand Down
9 changes: 8 additions & 1 deletion trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)

static inline struct kmem_cache *page_get_cache(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return (struct kmem_cache *)page->lru.next;
}

Expand All @@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)

static inline struct slab *page_get_slab(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return (struct slab *)page->lru.prev;
}

Expand Down Expand Up @@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
struct page *page;

/* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
page = virt_to_page(objp);

i = 1;
if (likely(!PageCompound(page)))
i <<= cachep->gfporder;
do {
page_set_cache(page, cachep);
page_set_slab(page, slabp);
Expand Down

0 comments on commit b303234

Please sign in to comment.