Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 189576
b: refs/heads/master
c: 116354d
h: refs/heads/master
v: v3
  • Loading branch information
Naoya Horiguchi authored and Linus Torvalds committed Apr 7, 2010
1 parent db39ae4 commit bd45d9a
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 33 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 57119c34e53bbb8d244c3ff1335ef5145768538f
refs/heads/master: 116354d177ba2da37e91cf884e3d11e67f825efd
27 changes: 7 additions & 20 deletions trunk/fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -662,31 +662,18 @@ static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
return pme;
}

static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
unsigned long end, struct mm_walk *walk)
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
struct hstate *hs = NULL;
int err = 0;
u64 pfn;

vma = find_vma(walk->mm, addr);
if (vma)
hs = hstate_vma(vma);
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;

if (vma && (addr >= vma->vm_end)) {
vma = find_vma(walk->mm, addr);
if (vma)
hs = hstate_vma(vma);
}

if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
/* calculate pfn of the "raw" page in the hugepage. */
int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
}
int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
Expand Down
4 changes: 2 additions & 2 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -783,8 +783,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
struct mm_walk *);
int (*hugetlb_entry)(pte_t *, unsigned long,
unsigned long, unsigned long, struct mm_walk *);
struct mm_struct *mm;
void *private;
};
Expand Down
47 changes: 37 additions & 10 deletions trunk/mm/pagewalk.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
return err;
}

#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
unsigned long end)
{
unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
return boundary < end ? boundary : end;
}

static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
pte_t *pte;
int err = 0;

do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
return err;
} while (addr = next, addr != end);

return 0;
}
#endif

/**
* walk_page_range - walk a memory map's page tables with a callback
* @mm: memory map to walk
Expand Down Expand Up @@ -128,20 +159,16 @@ int walk_page_range(unsigned long addr, unsigned long end,
vma = find_vma(walk->mm, addr);
#ifdef CONFIG_HUGETLB_PAGE
if (vma && is_vm_hugetlb_page(vma)) {
pte_t *pte;
struct hstate *hs;

if (vma->vm_end < next)
next = vma->vm_end;
hs = hstate_vma(vma);
pte = huge_pte_offset(walk->mm,
addr & huge_page_mask(hs));
if (pte && !huge_pte_none(huge_ptep_get(pte))
&& walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, addr,
next, walk);
/*
* Hugepage is very tightly coupled with vma, so
* walk through hugetlb entries within a given vma.
*/
err = walk_hugetlb_range(vma, addr, next, walk);
if (err)
break;
pgd = pgd_offset(walk->mm, next);
continue;
}
#endif
Expand Down

0 comments on commit bd45d9a

Please sign in to comment.