Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 115852
b: refs/heads/master
c: 8edb08c
h: refs/heads/master
v: v3
  • Loading branch information
Lee Schermerhorn authored and Linus Torvalds committed Oct 20, 2008
1 parent 8e5f180 commit 6030c60
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fa07e787733416c42938a310a8e717295934e33c
refs/heads/master: 8edb08caf68184fb170f4f69c7445929e199eaea
46 changes: 43 additions & 3 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,7 @@ static void __munlock_vma_pages_range(struct vm_area_struct *vma,
int mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int nr_pages = (end - start) / PAGE_SIZE;
BUG_ON(!(vma->vm_flags & VM_LOCKED));

Expand All @@ -329,8 +330,19 @@ int mlock_vma_pages_range(struct vm_area_struct *vma,

if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current)))
return __mlock_vma_pages_range(vma, start, end);
vma == get_gate_vma(current))) {
downgrade_write(&mm->mmap_sem);
nr_pages = __mlock_vma_pages_range(vma, start, end);

up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
vma = find_vma(mm, start);
/* non-NULL vma must contain @start, but need to check @end */
if (!vma || end > vma->vm_end)
return -EAGAIN;
return nr_pages;
}

/*
* User mapped kernel pages or huge pages:
Expand Down Expand Up @@ -424,13 +436,41 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
vma->vm_flags = newflags;

if (lock) {
/*
* mmap_sem is currently held for write. Downgrade the write
* lock to a read lock so that other faults, mmap scans, ...
* while we fault in all pages.
*/
downgrade_write(&mm->mmap_sem);

ret = __mlock_vma_pages_range(vma, start, end);
if (ret > 0) {
mm->locked_vm -= ret;
ret = 0;
}
} else
/*
* Need to reacquire mmap sem in write mode, as our callers
* expect this. We have no support for atomically upgrading
* a sem to write, so we need to check for ranges while sem
* is unlocked.
*/
up_read(&mm->mmap_sem);
/* vma can change or disappear */
down_write(&mm->mmap_sem);
*prev = find_vma(mm, start);
/* non-NULL *prev must contain @start, but need to check @end */
if (!(*prev) || end > (*prev)->vm_end)
ret = -EAGAIN;
} else {
/*
* TODO: for unlocking, pages will already be resident, so
* we don't need to wait for allocations/reclaim/pagein, ...
* However, unlocking a very large region can still take a
* while. Should we downgrade the semaphore for both lock
* AND unlock ?
*/
__munlock_vma_pages_range(vma, start, end);
}

out:
*prev = vma;
Expand Down

0 comments on commit 6030c60

Please sign in to comment.