Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 230726
b: refs/heads/master
c: 110d74a
h: refs/heads/master
v: v3
  • Loading branch information
Michel Lespinasse authored and Linus Torvalds committed Jan 14, 2011
1 parent 115c6fd commit 8861c76
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 61 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fed067da46ad3b9acedaf794a5f05d0bc153280b
refs/heads/master: 110d74a921f4d272b47ef6104fcf937df808f4c8
1 change: 1 addition & 0 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1415,6 +1415,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_MLOCK 0x40 /* mark page as mlocked */

typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
Expand Down
22 changes: 22 additions & 0 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1310,6 +1310,28 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
*/
mark_page_accessed(page);
}
if (flags & FOLL_MLOCK) {
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here and migration is
* blocked by the pte's page reference, we need
* only check for file-cache page truncation.
*/
if (page->mapping)
mlock_vma_page(page);
unlock_page(page);
}
}
unlock:
pte_unmap_unlock(ptep, ptl);
out:
Expand Down
65 changes: 5 additions & 60 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,18 +159,17 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
int ret = 0;
int gup_flags;
int ret;

VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));

gup_flags = FOLL_TOUCH | FOLL_GET;
gup_flags = FOLL_TOUCH | FOLL_MLOCK;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
Expand All @@ -185,63 +184,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
nr_pages--;
}

while (nr_pages > 0) {
int i;

cond_resched();

/*
* get_user_pages makes pages present if we are
* setting mlock. and this extra reference count will
* disable migration of this page. However, page may
* still be truncated out from under us.
*/
ret = __get_user_pages(current, mm, addr,
min_t(int, nr_pages, ARRAY_SIZE(pages)),
gup_flags, pages, NULL);
/*
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file.
* We'll mlock the pages if/when they get faulted in.
*/
if (ret < 0)
break;

lru_add_drain(); /* push cached pages to LRU */

for (i = 0; i < ret; i++) {
struct page *page = pages[i];

if (page->mapping) {
/*
* That preliminary check is mainly to avoid
* the pointless overhead of lock_page on the
* ZERO_PAGE: which might bounce very badly if
* there is contention. However, we're still
* dirtying its cacheline with get/put_page:
* we'll add another __get_user_pages flag to
* avoid it if that case turns out to matter.
*/
lock_page(page);
/*
* Because we lock page here and migration is
* blocked by the elevated reference, we need
* only check for file-cache page truncation.
*/
if (page->mapping)
mlock_vma_page(page);
unlock_page(page);
}
put_page(page); /* ref from get_user_pages() */
}

addr += ret * PAGE_SIZE;
nr_pages -= ret;
ret = 0;
}

return ret; /* 0 or negative error code */
ret = __get_user_pages(current, mm, addr, nr_pages, gup_flags,
NULL, NULL);
return max(ret, 0); /* 0 or negative error code */
}

/*
Expand Down

0 comments on commit 8861c76

Please sign in to comment.