Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 164466
b: refs/heads/master
c: 408e82b
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Sep 22, 2009
1 parent 3968728 commit 6cceed8
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 60 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5d3bc2709114b416cab588c577e02c2470e40a6c
refs/heads/master: 408e82b78bcc9f1b47c76e833c3df97f675947de
99 changes: 40 additions & 59 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,49 +139,36 @@ static void munlock_vma_page(struct page *page)
}

/**
* __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
* @mlock: 0 indicate munlock, otherwise mlock.
*
* If @mlock == 0, unlock an mlocked range;
* else mlock the range of pages. This takes care of making the pages present ,
* too.
* This takes care of making the pages present too.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held for at least read.
*/
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
int mlock)
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
int ret = 0;
int gup_flags = 0;
int gup_flags;

VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
(atomic_read(&mm->mm_users) != 0));

/*
* mlock: don't page populate if vma has PROT_NONE permission.
* munlock: always do munlock although the vma has PROT_NONE
* permission, or SIGKILL is pending.
*/
if (!mlock)
gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
GUP_FLAGS_IGNORE_SIGKILL;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));

gup_flags = 0;
if (vma->vm_flags & VM_WRITE)
gup_flags |= GUP_FLAGS_WRITE;
gup_flags = GUP_FLAGS_WRITE;

while (nr_pages > 0) {
int i;
Expand All @@ -201,19 +188,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file.
* We'll mlock the the pages if/when they get faulted in.
* We'll mlock the pages if/when they get faulted in.
*/
if (ret < 0)
break;
if (ret == 0) {
/*
* We know the vma is there, so the only time
* we cannot get a single page should be an
* error (ret < 0) case.
*/
WARN_ON(1);
break;
}

lru_add_drain(); /* push cached pages to LRU */

Expand All @@ -224,28 +202,22 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
/*
* Because we lock page here and migration is blocked
* by the elevated reference, we need only check for
* page truncation (file-cache only).
* file-cache page truncation. This page->mapping
* check also neatly skips over the ZERO_PAGE(),
* though if that's common we'd prefer not to lock it.
*/
if (page->mapping) {
if (mlock)
mlock_vma_page(page);
else
munlock_vma_page(page);
}
if (page->mapping)
mlock_vma_page(page);
unlock_page(page);
put_page(page); /* ref from get_user_pages() */

/*
* here we assume that get_user_pages() has given us
* a list of virtually contiguous pages.
*/
addr += PAGE_SIZE; /* for next get_user_pages() */
nr_pages--;
put_page(page); /* ref from get_user_pages() */
}

addr += ret * PAGE_SIZE;
nr_pages -= ret;
ret = 0;
}

return ret; /* count entire vma as locked_vm */
return ret; /* 0 or negative error code */
}

/*
Expand Down Expand Up @@ -289,7 +261,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {

__mlock_vma_pages_range(vma, start, end, 1);
__mlock_vma_pages_range(vma, start, end);

/* Hide errors from mmap() and other callers */
return 0;
Expand All @@ -310,7 +282,6 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
return nr_pages; /* error or pages NOT mlocked */
}


/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
Expand All @@ -330,10 +301,24 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
* free them. This will result in freeing mlocked pages.
*/
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end)
{
unsigned long addr;

lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
__mlock_vma_pages_range(vma, start, end, 0);

for (addr = start; addr < end; addr += PAGE_SIZE) {
struct page *page = follow_page(vma, addr, FOLL_GET);
if (page) {
lock_page(page);
if (page->mapping)
munlock_vma_page(page);
unlock_page(page);
put_page(page);
}
cond_resched();
}
}

/*
Expand Down Expand Up @@ -400,18 +385,14 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, __mlock_vma_pages_range will bring it back.
*/
vma->vm_flags = newflags;

if (lock) {
ret = __mlock_vma_pages_range(vma, start, end, 1);

if (ret > 0) {
mm->locked_vm -= ret;
ret = 0;
} else
ret = __mlock_posix_error_return(ret); /* translate if needed */
vma->vm_flags = newflags;
ret = __mlock_vma_pages_range(vma, start, end);
if (ret < 0)
ret = __mlock_posix_error_return(ret);
} else {
__mlock_vma_pages_range(vma, start, end, 0);
munlock_vma_pages_range(vma, start, end);
}

out:
Expand Down

0 comments on commit 6cceed8

Please sign in to comment.