Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 94052
b: refs/heads/master
c: 0c0a4a5
h: refs/heads/master
v: v3
  • Loading branch information
Yasunori Goto authored and Linus Torvalds committed Apr 28, 2008
1 parent 24984ec commit 0404275
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 86f6dae1377523689bd8468fed2f2dd180fc0560
refs/heads/master: 0c0a4a517a31e05efb38304668198a873bfec6ca
3 changes: 1 addition & 2 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}

extern void __init __free_pages_bootmem(struct page *page,
unsigned int order);
extern void __free_pages_bootmem(struct page *page, unsigned int order);

/*
* function for dealing with page's order in buddy system.
Expand Down
11 changes: 11 additions & 0 deletions trunk/mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,16 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
return register_new_memory(__pfn_to_section(phys_start_pfn));
}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
/*
* XXX: Freeing memmap with vmemmap is not implement yet.
* This should be removed later.
*/
return -EBUSY;
}
#else
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
unsigned long flags;
Expand All @@ -216,6 +226,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
pgdat_resize_unlock(pgdat, &flags);
return 0;
}
#endif

/*
* Reasonably generic function for adding memory. It is
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
void __init __free_pages_bootmem(struct page *page, unsigned int order)
void __free_pages_bootmem(struct page *page, unsigned int order)
{
if (order == 0) {
__ClearPageReserved(page);
Expand Down
51 changes: 47 additions & 4 deletions trunk/mm/sparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
Expand Down Expand Up @@ -376,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
return; /* XXX: Not implemented yet */
}
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
Expand Down Expand Up @@ -413,28 +417,67 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
free_pages((unsigned long)memmap,
get_order(sizeof(struct page) * nr_pages));
}

static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
unsigned long maps_section_nr, removing_section_nr, i;
int magic;

for (i = 0; i < nr_pages; i++, page++) {
magic = atomic_read(&page->_mapcount);

BUG_ON(magic == NODE_INFO);

maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
removing_section_nr = page->private;

/*
* When this function is called, the removing section is
* logical offlined state. This means all pages are isolated
* from page allocator. If removing section's memmap is placed
* on the same section, it must not be freed.
* If it is freed, page allocator may allocate it which will
* be removed physically soon.
*/
if (maps_section_nr != removing_section_nr)
put_page_bootmem(page);
}
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
struct page *usemap_page;
unsigned long nr_pages;

if (!usemap)
return;

usemap_page = virt_to_page(usemap);
/*
* Check to see if allocation came from hot-plug-add
*/
if (PageSlab(virt_to_page(usemap))) {
if (PageSlab(usemap_page)) {
kfree(usemap);
if (memmap)
__kfree_section_memmap(memmap, PAGES_PER_SECTION);
return;
}

/*
* TODO: Allocations came from bootmem - how do I free up ?
* The usemap came from bootmem. This is packed with other usemaps
* on the section which has pgdat at boot time. Just keep it as is now.
*/
printk(KERN_WARNING "Not freeing up allocations from bootmem "
"- leaking memory\n");

if (memmap) {
struct page *memmap_page;
memmap_page = virt_to_page(memmap);

nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;

free_map_bootmem(memmap_page, nr_pages);
}
}

/*
Expand Down

0 comments on commit 0404275

Please sign in to comment.