Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 376281
b: refs/heads/master
c: a9ff785
h: refs/heads/master
i:
  376279: e393dbc
v: v3
  • Loading branch information
Cliff Wickman authored and Linus Torvalds committed May 24, 2013
1 parent cecefd9 commit 53783d2
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 35 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 43c523bff7c3b47506d536c10637be8399dfd85f
refs/heads/master: a9ff785e4437c83d2179161e012f5bdfbd6381f0
70 changes: 36 additions & 34 deletions trunk/mm/pagewalk.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
return 0;
}

static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
struct vm_area_struct *vma;

/* We don't need vma lookup at all. */
if (!walk->hugetlb_entry)
return NULL;

VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
vma = find_vma(walk->mm, addr);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
return vma;

return NULL;
}

#else /* CONFIG_HUGETLB_PAGE */
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
return NULL;
}

static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
Expand Down Expand Up @@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (!walk->mm)
return -EINVAL;

VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));

pgd = pgd_offset(walk->mm, addr);
do {
struct vm_area_struct *vma;
struct vm_area_struct *vma = NULL;

next = pgd_addr_end(addr, end);

/*
* handle hugetlb vma individually because pagetable walk for
* the hugetlb page is dependent on the architecture and
* we can't handled it in the same manner as non-huge pages.
* This function was not intended to be vma based.
* But there are vma special cases to be handled:
* - hugetlb vma's
* - VM_PFNMAP vma's
*/
vma = hugetlb_vma(addr, walk);
vma = find_vma(walk->mm, addr);
if (vma) {
if (vma->vm_end < next)
/*
* There are no page structures backing a VM_PFNMAP
* range, so do not allow split_huge_page_pmd().
*/
if ((vma->vm_start <= addr) &&
(vma->vm_flags & VM_PFNMAP)) {
next = vma->vm_end;
pgd = pgd_offset(walk->mm, next);
continue;
}
/*
* Hugepage is very tightly coupled with vma, so
* walk through hugetlb entries within a given vma.
* Handle hugetlb vma individually because pagetable
* walk for the hugetlb page is dependent on the
* architecture and we can't handled it in the same
* manner as non-huge pages.
*/
err = walk_hugetlb_range(vma, addr, next, walk);
if (err)
break;
pgd = pgd_offset(walk->mm, next);
continue;
if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
is_vm_hugetlb_page(vma)) {
if (vma->vm_end < next)
next = vma->vm_end;
/*
* Hugepage is very tightly coupled with vma,
* so walk through hugetlb entries within a
* given vma.
*/
err = walk_hugetlb_range(vma, addr, next, walk);
if (err)
break;
pgd = pgd_offset(walk->mm, next);
continue;
}
}

if (pgd_none_or_clear_bad(pgd)) {
Expand Down

0 comments on commit 53783d2

Please sign in to comment.