Skip to content

Commit

Permalink
[PATCH] Hugepage consolidation
Browse files Browse the repository at this point in the history
A lot of the code in arch/*/mm/hugetlbpage.c is quite similar.  This patch
attempts to consolidate a lot of the code across the arch's, putting the
combined version in mm/hugetlb.c.  There are a couple of uglyish hacks in
order to covert all the hugepage archs, but the result is a very large
reduction in the total amount of code.  It also means things like hugepage
lazy allocation could be implemented in one place, instead of six.

Tested, at least a little, on ppc64, i386 and x86_64.

Notes:
	- this patch changes the meaning of set_huge_pte() to be more
	  analagous to set_pte()
	- does SH4 need s special huge_ptep_get_and_clear()??

Acked-by: William Lee Irwin <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
David Gibson authored and Linus Torvalds committed Jun 22, 2005
1 parent 1e7e5a9 commit 63551ae
Show file tree
Hide file tree
Showing 19 changed files with 300 additions and 850 deletions.
170 changes: 8 additions & 162 deletions arch/i386/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>

static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
Expand All @@ -30,7 +30,7 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}

static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
Expand All @@ -42,21 +42,6 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd;
}

static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
{
pte_t entry;

add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else
entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
entry = pte_mkyoung(entry);
mk_pte_huge(entry);
set_pte(page_table, entry);
}

/*
* This function checks for proper alignment of input addr and len parameters.
*/
Expand All @@ -69,77 +54,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0;
}

int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;

while (addr < end) {
dst_pte = huge_pte_alloc(dst, addr);
if (!dst_pte)
goto nomem;
src_pte = huge_pte_offset(src, addr);
entry = *src_pte;
ptepage = pte_page(entry);
get_page(ptepage);
set_pte(dst_pte, entry);
add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
return 0;

nomem:
return -ENOMEM;
}

int
follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)
{
unsigned long vpfn, vaddr = *position;
int remainder = *length;

WARN_ON(!is_vm_hugetlb_page(vma));

vpfn = vaddr/PAGE_SIZE;
while (vaddr < vma->vm_end && remainder) {

if (pages) {
pte_t *pte;
struct page *page;

pte = huge_pte_offset(mm, vaddr);

/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));

page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];

WARN_ON(!PageCompound(page));

get_page(page);
pages[i] = page;
}

if (vmas)
vmas[i] = vma;

vaddr += PAGE_SIZE;
++vpfn;
--remainder;
++i;
}

*length = remainder;
*position = vaddr;

return i;
}

#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
Expand Down Expand Up @@ -204,83 +118,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
}
#endif

void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
void hugetlb_clean_stale_pgtable(pte_t *pte)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t pte, *ptep;
pmd_t *pmd = (pmd_t *) pte;
struct page *page;

BUG_ON(start & (HPAGE_SIZE - 1));
BUG_ON(end & (HPAGE_SIZE - 1));

for (address = start; address < end; address += HPAGE_SIZE) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
pte = ptep_get_and_clear(mm, address, ptep);
if (pte_none(pte))
continue;
page = pte_page(pte);
put_page(page);
}
add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT));
flush_tlb_range(vma, start, end);
}

int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret = 0;

BUG_ON(vma->vm_start & ~HPAGE_MASK);
BUG_ON(vma->vm_end & ~HPAGE_MASK);

spin_lock(&mm->page_table_lock);
for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
unsigned long idx;
pte_t *pte = huge_pte_alloc(mm, addr);
struct page *page;

if (!pte) {
ret = -ENOMEM;
goto out;
}

if (!pte_none(*pte))
continue;

idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
page = find_get_page(mapping, idx);
if (!page) {
/* charge the fs quota first */
if (hugetlb_get_quota(mapping)) {
ret = -ENOMEM;
goto out;
}
page = alloc_huge_page();
if (!page) {
hugetlb_put_quota(mapping);
ret = -ENOMEM;
goto out;
}
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
if (! ret) {
unlock_page(page);
} else {
hugetlb_put_quota(mapping);
free_huge_page(page);
goto out;
}
}
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
}
out:
spin_unlock(&mm->page_table_lock);
return ret;
page = pmd_page(*pmd);
pmd_clear(pmd);
dec_page_state(nr_page_table_pages);
page_cache_release(page);
}

/* x86_64 also uses this file */
Expand Down
Loading

0 comments on commit 63551ae

Please sign in to comment.