Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 35571
b: refs/heads/master
c: b221385
h: refs/heads/master
i:
  35569: cf1fcea
  35567: 89c4e30
v: v3
  • Loading branch information
Adrian Bunk authored and Linus Torvalds committed Sep 26, 2006
1 parent 1f6fef3 commit 4d7cacf
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 32 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 204ec841fbea3e5138168edbc3a76d46747cc987
refs/heads/master: b221385bc41d6789edde3d2fa0cb20d5045730eb
2 changes: 0 additions & 2 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page)
return atomic_inc_not_zero(&page->_count);
}

extern void FASTCALL(__page_cache_release(struct page *));

static inline int page_count(struct page *page)
{
if (unlikely(PageCompound(page)))
Expand Down
2 changes: 0 additions & 2 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern const char *kmem_cache_name(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);

/* Size description struct for general caches. */
struct cache_sizes {
Expand Down Expand Up @@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
/* SLOB allocator routines */

void kmem_cache_init(void);
struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
unsigned long,
void (*)(void *, struct kmem_cache *, unsigned long),
Expand Down
2 changes: 0 additions & 2 deletions trunk/include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot);
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
pgprot_t prot, int node);
extern void vfree(void *addr);

extern void *vmap(struct page **pages, unsigned int count,
Expand Down
3 changes: 1 addition & 2 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
return csizep->cs_cachep;
}

struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
Expand Down
39 changes: 19 additions & 20 deletions trunk/mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,25 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;

/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
*/
static void fastcall __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
unsigned long flags;
struct zone *zone = page_zone(page);

spin_lock_irqsave(&zone->lru_lock, flags);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
del_page_from_lru(zone, page);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
free_hot_page(page);
}

static void put_compound_page(struct page *page)
{
page = (struct page *)page_private(page);
Expand Down Expand Up @@ -222,26 +241,6 @@ int lru_add_drain_all(void)
}
#endif

/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
*/
void fastcall __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
unsigned long flags;
struct zone *zone = page_zone(page);

spin_lock_irqsave(&zone->lru_lock, flags);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
del_page_from_lru(zone, page);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
free_hot_page(page);
}
EXPORT_SYMBOL(__page_cache_release);

/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
Expand Down
8 changes: 5 additions & 3 deletions trunk/mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;

static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node);

static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
pte_t *pte;
Expand Down Expand Up @@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node)
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node)
{
struct vm_struct *area;

Expand All @@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,

return __vmalloc_area_node(area, gfp_mask, prot, node);
}
EXPORT_SYMBOL(__vmalloc_node);

void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
Expand Down

0 comments on commit 4d7cacf

Please sign in to comment.