Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 176296
b: refs/heads/master
c: 80e1482
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Dec 15, 2009
1 parent e6da40c commit 9dfea65
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 49 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4035c07a895974d0ac06a56fe870ad293fc451a7
refs/heads/master: 80e148226028257ec0a1909d99b2c40d0ffe17f2
67 changes: 21 additions & 46 deletions trunk/mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* try_to_merge_one_page - take two pages and merge them into one
* @vma: the vma that holds the pte pointing to page
* @page: the PageAnon page that we want to replace with kpage
* @kpage: the PageKsm page that we want to map instead of page
* @kpage: the PageKsm page that we want to map instead of page,
* or NULL the first time when we want to use page as kpage.
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
Expand Down Expand Up @@ -864,15 +865,24 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
if (write_protect_page(vma, page, &orig_pte) == 0 &&
pages_identical(page, kpage))
err = replace_page(vma, page, kpage, orig_pte);
if (write_protect_page(vma, page, &orig_pte) == 0) {
if (!kpage) {
/*
* While we hold page lock, upgrade page from
* PageAnon+anon_vma to PageKsm+NULL stable_node:
* stable_tree_insert() will update stable_node.
*/
set_page_stable_node(page, NULL);
mark_page_accessed(page);
err = 0;
} else if (pages_identical(page, kpage))
err = replace_page(vma, page, kpage, orig_pte);
}

if ((vma->vm_flags & VM_LOCKED) && !err) {
if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
munlock_vma_page(page);
if (!PageMlocked(kpage)) {
unlock_page(page);
lru_add_drain();
lock_page(kpage);
mlock_vma_page(kpage);
page = kpage; /* for final unlock */
Expand Down Expand Up @@ -922,18 +932,15 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
* This function returns the kpage if we successfully merged two identical
* pages into one ksm page, NULL otherwise.
*
* Note that this function allocates a new kernel page: if one of the pages
* Note that this function upgrades page to ksm page: if one of the pages
* is already a ksm page, try_to_merge_with_ksm_page should be used.
*/
static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
struct page *page,
struct rmap_item *tree_rmap_item,
struct page *tree_page)
{
struct mm_struct *mm = rmap_item->mm;
struct vm_area_struct *vma;
struct page *kpage;
int err = -EFAULT;
int err;

/*
* The number of nodes in the stable tree
Expand All @@ -943,49 +950,18 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
ksm_max_kernel_pages <= ksm_pages_shared)
return NULL;

kpage = alloc_page(GFP_HIGHUSER);
if (!kpage)
return NULL;

down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto up;
vma = find_vma(mm, rmap_item->address);
if (!vma || vma->vm_start > rmap_item->address)
goto up;

copy_user_highpage(kpage, page, rmap_item->address, vma);

SetPageDirty(kpage);
__SetPageUptodate(kpage);
SetPageSwapBacked(kpage);
set_page_stable_node(kpage, NULL); /* mark it PageKsm */
lru_cache_add_lru(kpage, LRU_ACTIVE_ANON);

err = try_to_merge_one_page(vma, page, kpage);
if (err)
goto up;

/* Must get reference to anon_vma while still holding mmap_sem */
hold_anon_vma(rmap_item, vma->anon_vma);
up:
up_read(&mm->mmap_sem);

err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
if (!err) {
err = try_to_merge_with_ksm_page(tree_rmap_item,
tree_page, kpage);
tree_page, page);
/*
* If that fails, we have a ksm page with only one pte
* pointing to it: so break it.
*/
if (err)
break_cow(rmap_item);
}
if (err) {
put_page(kpage);
kpage = NULL;
}
return kpage;
return err ? NULL : page;
}

/*
Expand Down Expand Up @@ -1244,7 +1220,6 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
stable_tree_append(rmap_item, stable_node);
}
unlock_page(kpage);
put_page(kpage);

/*
* If we fail to insert the page into the stable tree,
Expand Down
6 changes: 4 additions & 2 deletions trunk/mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
unsigned long anon_mapping;

rcu_read_lock();
anon_mapping = (unsigned long) page->mapping;
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
Expand Down Expand Up @@ -666,7 +666,9 @@ static void __page_check_anon_rmap(struct page *page,
* @address: the user virtual address mapped
*
* The caller needs to hold the pte lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting.
* the anon_vma case: to serialize mapping,index checking after setting,
* and to ensure that PageAnon is not being upgraded racily to PageKsm
* (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
Expand Down

0 comments on commit 9dfea65

Please sign in to comment.