Skip to content

Commit

Permalink
[PATCH] sparsemem extreme: hotplug preparation
Browse files Browse the repository at this point in the history
This splits up sparse_index_alloc() into two pieces.  This is needed
because we'll allocate the memory for the second level in a different place
from where we actually consume it to keep the allocation from happening
underneath a lock

Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Dave Hansen authored and Linus Torvalds committed Sep 5, 2005
1 parent 3e34726 commit 28ae55c
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 12 deletions.
1 change: 1 addition & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -588,6 +588,7 @@ static inline int pfn_valid(unsigned long pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */

#ifdef CONFIG_NODES_SPAN_OTHER_NODES
Expand Down
53 changes: 41 additions & 12 deletions mm/sparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/dma.h>

/*
Expand All @@ -22,27 +23,55 @@ struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
#endif
EXPORT_SYMBOL(mem_section);

static void sparse_alloc_root(unsigned long root, int nid)
{
#ifdef CONFIG_SPARSEMEM_EXTREME
mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE);
#endif
static struct mem_section *sparse_index_alloc(int nid)
{
struct mem_section *section = NULL;
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);

section = alloc_bootmem_node(NODE_DATA(nid), array_size);

if (section)
memset(section, 0, array_size);

return section;
}

static void sparse_index_init(unsigned long section, int nid)
static int sparse_index_init(unsigned long section_nr, int nid)
{
unsigned long root = SECTION_NR_TO_ROOT(section);
static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
int ret = 0;

if (mem_section[root])
return;
return -EEXIST;

sparse_alloc_root(root, nid);
section = sparse_index_alloc(nid);
/*
* This lock keeps two different sections from
* reallocating for the same index
*/
spin_lock(&index_init_lock);

if (mem_section[root])
memset(mem_section[root], 0, PAGE_SIZE);
else
panic("memory_present: NO MEMORY\n");
if (mem_section[root]) {
ret = -EEXIST;
goto out;
}

mem_section[root] = section;
out:
spin_unlock(&index_init_lock);
return ret;
}
#else /* !SPARSEMEM_EXTREME */
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
return 0;
}
#endif

/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
Expand Down

0 comments on commit 28ae55c

Please sign in to comment.