Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 164473
b: refs/heads/master
c: 58fa879
h: refs/heads/master
i:
  164471: f1ae49a
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Sep 22, 2009
1 parent 4cd6fff commit f7297ce
Show file tree
Hide file tree
Showing 6 changed files with 32 additions and 41 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a13ea5b759645a0779edc6dbfec9abfd83220844
refs/heads/master: 58fa879e1e640a1856f736b418984ebeccee1c95
1 change: 1 addition & 0 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1232,6 +1232,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */

typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
Expand Down
6 changes: 1 addition & 5 deletions trunk/mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
}
#endif /* CONFIG_SPARSEMEM */

#define GUP_FLAGS_WRITE 0x01
#define GUP_FLAGS_FORCE 0x02
#define GUP_FLAGS_DUMP 0x04

int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int flags,
unsigned long start, int len, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas);

#define ZONE_RECLAIM_NOSCAN -2
Expand Down
44 changes: 19 additions & 25 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1209,27 +1209,29 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
}

int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int flags,
unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags = 0;
int write = !!(flags & GUP_FLAGS_WRITE);
int force = !!(flags & GUP_FLAGS_FORCE);
unsigned long vm_flags;

if (nr_pages <= 0)
return 0;

VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

/*
* Require read or write permissions.
* If 'force' is set, we only require the "MAY" flags.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;

do {
struct vm_area_struct *vma;
unsigned int foll_flags;

vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
Expand All @@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_t *pte;

/* user gate pages are read-only */
if (write)
if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
Expand Down Expand Up @@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;

foll_flags = FOLL_TOUCH;
if (pages)
foll_flags |= FOLL_GET;
if (flags & GUP_FLAGS_DUMP)
foll_flags |= FOLL_DUMP;
if (write)
foll_flags |= FOLL_WRITE;

if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, foll_flags);
&start, &nr_pages, i, gup_flags);
continue;
}

do {
struct page *page;
unsigned int foll_flags = gup_flags;

/*
* If we have a pending SIGKILL, don't keep faulting
Expand All @@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;

if (write)
foll_flags |= FOLL_WRITE;

cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
Expand Down Expand Up @@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
int flags = FOLL_TOUCH;

if (pages)
flags |= FOLL_GET;
if (write)
flags |= GUP_FLAGS_WRITE;
flags |= FOLL_WRITE;
if (force)
flags |= GUP_FLAGS_FORCE;
flags |= FOLL_FORCE;

return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
Expand All @@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr)
struct page *page;

if (__get_user_pages(current, current->mm, addr, 1,
GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
return NULL;
if (page == ZERO_PAGE(0)) {
page_cache_release(page);
Expand Down
4 changes: 2 additions & 2 deletions trunk/mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));

gup_flags = 0;
gup_flags = FOLL_TOUCH | FOLL_GET;
if (vma->vm_flags & VM_WRITE)
gup_flags = GUP_FLAGS_WRITE;
gup_flags |= FOLL_WRITE;

while (nr_pages > 0) {
int i;
Expand Down
16 changes: 8 additions & 8 deletions trunk/mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp)
}

int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int flags,
unsigned long start, int nr_pages, int foll_flags,
struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
int write = !!(flags & GUP_FLAGS_WRITE);
int force = !!(flags & GUP_FLAGS_FORCE);

/* calculate required read or write permissions.
* - if 'force' is set, we only require the "MAY" flags.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
vm_flags = (foll_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (foll_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);

for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
Expand Down Expand Up @@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int flags = 0;

if (write)
flags |= GUP_FLAGS_WRITE;
flags |= FOLL_WRITE;
if (force)
flags |= GUP_FLAGS_FORCE;
flags |= FOLL_FORCE;

return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
Expand Down

0 comments on commit f7297ce

Please sign in to comment.