Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69587
b: refs/heads/master
c: d29eff7
h: refs/heads/master
i:
  69585: 51d477a
  69583: cad3eb7
v: v3
  • Loading branch information
Andy Whitcroft authored and Linus Torvalds committed Oct 16, 2007
1 parent 1722394 commit 827c829
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 46644c2477c58906e95281636d04e9cc42b39352
refs/heads/master: d29eff7bca60c9ee401d691d4562a4abca8de543
1 change: 1 addition & 0 deletions trunk/arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on PPC64
select SPARSEMEM_VMEMMAP_ENABLE

config ARCH_SPARSEMEM_DEFAULT
def_bool y
Expand Down
67 changes: 67 additions & 0 deletions trunk/arch/powerpc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,3 +183,70 @@ void pgtable_cache_init(void)
zero_ctor);
}
}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the pfn of the page that
* represents the start of the section it is within. Note that we have to
* do this by hand as the proffered address may not be correctly aligned.
* Subtraction of non-aligned pointers produces undefined results.
*/
unsigned long __meminit vmemmap_section_start(unsigned long page)
{
unsigned long offset = page - ((unsigned long)(vmemmap));

/* Return the pfn of the start of the section. */
return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
}

/*
* Check if this vmemmap page is already initialised. If any section
* which overlaps this vmemmap page is initialised then this page is
* initialised already.
*/
int __meminit vmemmap_populated(unsigned long start, int page_size)
{
unsigned long end = start + page_size;

for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
if (pfn_valid(vmemmap_section_start(start)))
return 1;

return 0;
}

int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;

mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;

/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);

for (; start < end; start += page_size) {
int mapped;
void *p;

if (vmemmap_populated(start, page_size))
continue;

p = vmemmap_alloc_block(page_size, node);
if (!p)
return -ENOMEM;

printk(KERN_WARNING "vmemmap %08lx allocated at %p, "
"physical %p.\n", start, p, __pa(p));

mapped = htab_bolt_mapping(start, start + page_size,
__pa(p), mode_rw, mmu_linear_psize);
BUG_ON(mapped < 0);
}

return 0;
}
#endif
8 changes: 8 additions & 0 deletions trunk/include/asm-powerpc/pgtable-ppc64.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,14 @@
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
#define USER_REGION_ID (0UL)

/*
* Defines the address of the vmemap area, in the top 16th of the
* kernel region.
*/
#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \
(0xfUL << (REGION_SHIFT - 4)))
#define vmemmap ((struct page *)VMEMMAP_BASE)

/*
* Common bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible. Additional
Expand Down

0 comments on commit 827c829

Please sign in to comment.