Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 23442
b: refs/heads/master
c: 9c50823
h: refs/heads/master
v: v3
  • Loading branch information
Andrew Morton authored and Linus Torvalds committed Mar 24, 2006
1 parent 72df833 commit d770d1c
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 33 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4741c9fd36b3bcadd37238321c469049da94a4b9
refs/heads/master: 9c50823eebf7c256b92b4e0f02b5fb30e97788c2
93 changes: 61 additions & 32 deletions trunk/mm/msync.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,20 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/writeback.h>
#include <linux/file.h>
#include <linux/syscalls.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>

static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
static unsigned long msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end)
{
pte_t *pte;
spinlock_t *ptl;
int progress = 0;
unsigned long ret = 0;

again:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Expand All @@ -43,58 +46,64 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page))
set_page_dirty(page);
page_test_and_clear_dirty(page))
ret += set_page_dirty(page);
progress += 3;
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
if (addr != end)
goto again;
return ret;
}

static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end)
static inline unsigned long msync_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end)
{
pmd_t *pmd;
unsigned long next;
unsigned long ret = 0;

pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
msync_pte_range(vma, pmd, addr, next);
ret += msync_pte_range(vma, pmd, addr, next);
} while (pmd++, addr = next, addr != end);
return ret;
}

static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end)
static inline unsigned long msync_pud_range(struct vm_area_struct *vma,
pgd_t *pgd, unsigned long addr, unsigned long end)
{
pud_t *pud;
unsigned long next;
unsigned long ret = 0;

pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
msync_pmd_range(vma, pud, addr, next);
ret += msync_pmd_range(vma, pud, addr, next);
} while (pud++, addr = next, addr != end);
return ret;
}

static void msync_page_range(struct vm_area_struct *vma,
static unsigned long msync_page_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pgd_t *pgd;
unsigned long next;
unsigned long ret = 0;

/* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need
* to do anything more on an msync().
*/
if (vma->vm_flags & VM_HUGETLB)
return;
return 0;

BUG_ON(addr >= end);
pgd = pgd_offset(vma->vm_mm, addr);
Expand All @@ -103,8 +112,9 @@ static void msync_page_range(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
msync_pud_range(vma, pgd, addr, next);
ret += msync_pud_range(vma, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
return ret;
}

/*
Expand All @@ -118,8 +128,9 @@ static void msync_page_range(struct vm_area_struct *vma,
* So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
* applications.
*/
static int msync_interval(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, int flags)
static int msync_interval(struct vm_area_struct *vma, unsigned long addr,
unsigned long end, int flags,
unsigned long *nr_pages_dirtied)
{
int ret = 0;
struct file *file = vma->vm_file;
Expand All @@ -128,7 +139,7 @@ static int msync_interval(struct vm_area_struct *vma,
return -EBUSY;

if (file && (vma->vm_flags & VM_SHARED)) {
msync_page_range(vma, addr, end);
*nr_pages_dirtied = msync_page_range(vma, addr, end);

if (flags & MS_SYNC) {
struct address_space *mapping = file->f_mapping;
Expand Down Expand Up @@ -157,11 +168,8 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
unsigned long end;
struct vm_area_struct *vma;
int unmapped_error, error = -EINVAL;
int done = 0;

if (flags & MS_SYNC)
current->flags |= PF_SYNCWRITE;

down_read(&current->mm->mmap_sem);
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
if (start & ~PAGE_MASK)
Expand All @@ -180,13 +188,19 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
* If the interval [start,end) covers some unmapped address ranges,
* just ignore them, but return -ENOMEM at the end.
*/
down_read(&current->mm->mmap_sem);
if (flags & MS_SYNC)
current->flags |= PF_SYNCWRITE;
vma = find_vma(current->mm, start);
unmapped_error = 0;
for (;;) {
do {
unsigned long nr_pages_dirtied = 0;
struct file *file;

/* Still start < end. */
error = -ENOMEM;
if (!vma)
goto out;
goto out_unlock;
/* Here start < vma->vm_end. */
if (start < vma->vm_start) {
unmapped_error = -ENOMEM;
Expand All @@ -195,22 +209,37 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
/* Here vma->vm_start <= start < vma->vm_end. */
if (end <= vma->vm_end) {
if (start < end) {
error = msync_interval(vma, start, end, flags);
error = msync_interval(vma, start, end, flags,
&nr_pages_dirtied);
if (error)
goto out;
goto out_unlock;
}
error = unmapped_error;
goto out;
done = 1;
} else {
/* Here vma->vm_start <= start < vma->vm_end < end. */
error = msync_interval(vma, start, vma->vm_end, flags,
&nr_pages_dirtied);
if (error)
goto out_unlock;
}
/* Here vma->vm_start <= start < vma->vm_end < end. */
error = msync_interval(vma, start, vma->vm_end, flags);
if (error)
goto out;
file = vma->vm_file;
start = vma->vm_end;
vma = vma->vm_next;
}
out:
up_read(&current->mm->mmap_sem);
if ((flags & MS_ASYNC) && file && nr_pages_dirtied) {
get_file(file);
up_read(&current->mm->mmap_sem);
balance_dirty_pages_ratelimited_nr(file->f_mapping,
nr_pages_dirtied);
fput(file);
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, start);
} else {
vma = vma->vm_next;
}
} while (!done);
out_unlock:
current->flags &= ~PF_SYNCWRITE;
up_read(&current->mm->mmap_sem);
out:
return error;
}

0 comments on commit d770d1c

Please sign in to comment.