Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 258937
b: refs/heads/master
c: 6c6d528
h: refs/heads/master
i:
  258935: d47e275
v: v3
  • Loading branch information
KOSAKI Motohiro authored and Linus Torvalds committed Jul 26, 2011
1 parent c64c016 commit df03eeb
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4b6ddbf7ed4ef2f40e0a27418146eedaa68953c6
refs/heads/master: 6c6d5280431544e4036886ea74e3334a98bc5f96
43 changes: 37 additions & 6 deletions trunk/mm/pagewalk.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,

return 0;
}
#endif

static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
struct vm_area_struct *vma;

/* We don't need vma lookup at all. */
if (!walk->hugetlb_entry)
return NULL;

VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
vma = find_vma(walk->mm, addr);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
return vma;

return NULL;
}

#else /* CONFIG_HUGETLB_PAGE */
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
return NULL;
}

static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
}

#endif /* CONFIG_HUGETLB_PAGE */



/**
* walk_page_range - walk a memory map's page tables with a callback
Expand Down Expand Up @@ -165,18 +197,17 @@ int walk_page_range(unsigned long addr, unsigned long end,

pgd = pgd_offset(walk->mm, addr);
do {
struct vm_area_struct *uninitialized_var(vma);
struct vm_area_struct *vma;

next = pgd_addr_end(addr, end);

#ifdef CONFIG_HUGETLB_PAGE
/*
* handle hugetlb vma individually because pagetable walk for
* the hugetlb page is dependent on the architecture and
* we can't handled it in the same manner as non-huge pages.
*/
vma = find_vma(walk->mm, addr);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) {
vma = hugetlb_vma(addr, walk);
if (vma) {
if (vma->vm_end < next)
next = vma->vm_end;
/*
Expand All @@ -189,7 +220,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, next);
continue;
}
#endif

if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
Expand Down

0 comments on commit df03eeb

Please sign in to comment.