Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 210457
b: refs/heads/master
c: 4969c11
h: refs/heads/master
i:
  210455: 0ffaf68
v: v3
  • Loading branch information
Andrea Arcangeli authored and Linus Torvalds committed Sep 10, 2010
1 parent 9d2e4ad commit a27c010
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7c5367f205f7d53659fb19b9fdf65b7bc1a592c6
refs/heads/master: 4969c1192d15afa3389e7ae3302096ff684ba655
20 changes: 9 additions & 11 deletions trunk/include/linux/ksm.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
struct stable_node;
struct mem_cgroup;

struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);

#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
Expand Down Expand Up @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
static inline struct page *ksm_might_need_to_copy(struct page *page,
static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = page_anon_vma(page);

if (!anon_vma ||
(anon_vma->root == vma->anon_vma->root &&
page->index == linear_page_index(vma, address)))
return page;

return ksm_does_need_to_copy(page, vma, address);
return anon_vma &&
(anon_vma->root != vma->anon_vma->root ||
page->index != linear_page_index(vma, address));
}

int page_referenced_ksm(struct page *page,
Expand Down Expand Up @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}

static inline struct page *ksm_might_need_to_copy(struct page *page,
static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
return page;
return 0;
}

static inline int page_referenced_ksm(struct page *page,
Expand Down
3 changes: 0 additions & 3 deletions trunk/mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
{
struct page *new_page;

unlock_page(page); /* any racers will COW it, not modify it */

new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
Expand All @@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
add_page_to_unevictable_list(new_page);
}

page_cache_release(page);
return new_page;
}

Expand Down
39 changes: 34 additions & 5 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page;
struct page *page, *swapcache = NULL;
swp_entry_t entry;
pte_t pte;
struct mem_cgroup *ptr = NULL;
Expand Down Expand Up @@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);

page = ksm_might_need_to_copy(page, vma, address);
if (!page) {
ret = VM_FAULT_OOM;
goto out;
/*
* Make sure try_to_free_swap didn't release the swapcache
* from under us. The page pin isn't enough to prevent that.
*/
if (unlikely(!PageSwapCache(page)))
goto out_page;

if (ksm_might_need_to_copy(page, vma, address)) {
swapcache = page;
page = ksm_does_need_to_copy(page, vma, address);

if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
swapcache = NULL;
goto out_page;
}
}

if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
Expand Down Expand Up @@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}

if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
Expand All @@ -2756,6 +2781,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(page);
out_release:
page_cache_release(page);
if (swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret;
}

Expand Down

0 comments on commit a27c010

Please sign in to comment.