Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 356590
b: refs/heads/master
c: b79bc0a
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Feb 24, 2013
1 parent 3c612e2 commit 1261c96
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 21 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4146d2d673e8d6abf9b30a5b5dd8cd95f29632eb
refs/heads/master: b79bc0a0c79e06cc87e17530e9c1c56c6f297e17
3 changes: 1 addition & 2 deletions trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -496,9 +496,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/*
* vm_normal_page() filters out zero pages, but there might
* still be PageReserved pages to skip, perhaps in a VDSO.
* And we cannot move PageKsm pages sensibly or safely yet.
*/
if (PageReserved(page) || PageKsm(page))
if (PageReserved(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
Expand Down
21 changes: 3 additions & 18 deletions trunk/mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -731,20 +731,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
lock_page(page);
}

/*
* Only memory hotplug's offline_pages() caller has locked out KSM,
* and can safely migrate a KSM page. The other cases have skipped
* PageKsm along with PageReserved - but it is only now when we have
* the page lock that we can be certain it will not go KSM beneath us
* (KSM will not upgrade a page from PageAnon to PageKsm when it sees
* its pagecount raised, but only here do we take the page lock which
* serializes that).
*/
if (PageKsm(page) && !offlining) {
rc = -EBUSY;
goto unlock;
}

/* charge against new page */
mem_cgroup_prepare_migration(page, newpage, &mem);

Expand All @@ -771,7 +757,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*/
if (PageAnon(page)) {
if (PageAnon(page) && !PageKsm(page)) {
/*
* Only page_lock_anon_vma_read() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
Expand Down Expand Up @@ -851,7 +837,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
mem_cgroup_end_migration(mem, page, newpage,
(rc == MIGRATEPAGE_SUCCESS ||
rc == MIGRATEPAGE_BALLOON_SUCCESS));
unlock:
unlock_page(page);
out:
return rc;
Expand Down Expand Up @@ -1155,7 +1140,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto set_status;

/* Use PageReserved to check for zero page */
if (PageReserved(page) || PageKsm(page))
if (PageReserved(page))
goto put_and_set;

pp->page = page;
Expand Down Expand Up @@ -1317,7 +1302,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,

err = -ENOENT;
/* Use PageReserved to check for zero page */
if (!page || PageReserved(page) || PageKsm(page))
if (!page || PageReserved(page))
goto set_status;

err = page_to_nid(page);
Expand Down

0 comments on commit 1261c96

Please sign in to comment.