Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 176299
b: refs/heads/master
c: 62b61f6
h: refs/heads/master
i:
  176297: b12f414
  176295: e6da40c
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Dec 15, 2009
1 parent 2f2e0c0 commit 0c58b2c
Show file tree
Hide file tree
Showing 6 changed files with 104 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e9995ef978a7d5296fe04a9a2c5ca6e66d8bb4e5
refs/heads/master: 62b61f611eb5e20f7e9f8619bfd03bdfe8af6348
8 changes: 3 additions & 5 deletions trunk/include/linux/migrate.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining);

extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
Expand All @@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,

static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private) { return -ENOSYS; }

static inline int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest) { return 0; }
unsigned long private, int offlining) { return -ENOSYS; }

static inline int migrate_prep(void) { return -ENOSYS; }

Expand Down
84 changes: 71 additions & 13 deletions trunk/mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/ksm.h>
Expand Down Expand Up @@ -108,14 +109,14 @@ struct ksm_scan {

/**
* struct stable_node - node of the stable rbtree
* @page: pointer to struct page of the ksm page
* @node: rb node of this ksm page in the stable tree
* @hlist: hlist head of rmap_items using this ksm page
* @kpfn: page frame number of this ksm page
*/
struct stable_node {
struct page *page;
struct rb_node node;
struct hlist_head hlist;
unsigned long kpfn;
};

/**
Expand Down Expand Up @@ -515,7 +516,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node)
struct page *page;
void *expected_mapping;

page = stable_node->page;
page = pfn_to_page(stable_node->kpfn);
expected_mapping = (void *)stable_node +
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
rcu_read_lock();
Expand Down Expand Up @@ -973,15 +974,15 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
* This function returns the stable tree node of identical content if found,
* NULL otherwise.
*/
static struct stable_node *stable_tree_search(struct page *page)
static struct page *stable_tree_search(struct page *page)
{
struct rb_node *node = root_stable_tree.rb_node;
struct stable_node *stable_node;

stable_node = page_stable_node(page);
if (stable_node) { /* ksm page forked */
get_page(page);
return stable_node;
return page;
}

while (node) {
Expand All @@ -1003,7 +1004,7 @@ static struct stable_node *stable_tree_search(struct page *page)
put_page(tree_page);
node = node->rb_right;
} else
return stable_node;
return tree_page;
}

return NULL;
Expand Down Expand Up @@ -1059,7 +1060,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage)

INIT_HLIST_HEAD(&stable_node->hlist);

stable_node->page = kpage;
stable_node->kpfn = page_to_pfn(kpage);
set_page_stable_node(kpage, stable_node);

return stable_node;
Expand Down Expand Up @@ -1170,17 +1171,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
remove_rmap_item_from_tree(rmap_item);

/* We first start with searching the page inside the stable tree */
stable_node = stable_tree_search(page);
if (stable_node) {
kpage = stable_node->page;
kpage = stable_tree_search(page);
if (kpage) {
err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
* The page was successfully merged:
* add its rmap_item to the stable tree.
*/
lock_page(kpage);
stable_tree_append(rmap_item, stable_node);
stable_tree_append(rmap_item, page_stable_node(kpage));
unlock_page(kpage);
}
put_page(kpage);
Expand Down Expand Up @@ -1715,12 +1715,63 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)

stable_node = page_stable_node(newpage);
if (stable_node) {
VM_BUG_ON(stable_node->page != oldpage);
stable_node->page = newpage;
VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
stable_node->kpfn = page_to_pfn(newpage);
}
}
#endif /* CONFIG_MIGRATION */

#ifdef CONFIG_MEMORY_HOTREMOVE
static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
struct rb_node *node;

for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
struct stable_node *stable_node;

stable_node = rb_entry(node, struct stable_node, node);
if (stable_node->kpfn >= start_pfn &&
stable_node->kpfn < end_pfn)
return stable_node;
}
return NULL;
}

static int ksm_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_notify *mn = arg;
struct stable_node *stable_node;

switch (action) {
case MEM_GOING_OFFLINE:
/*
* Keep it very simple for now: just lock out ksmd and
* MADV_UNMERGEABLE while any memory is going offline.
*/
mutex_lock(&ksm_thread_mutex);
break;

case MEM_OFFLINE:
/*
* Most of the work is done by page migration; but there might
* be a few stable_nodes left over, still pointing to struct
* pages which have been offlined: prune those from the tree.
*/
while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
mn->start_pfn + mn->nr_pages)) != NULL)
remove_node_from_stable_tree(stable_node);
/* fallthrough */

case MEM_CANCEL_OFFLINE:
mutex_unlock(&ksm_thread_mutex);
break;
}
return NOTIFY_OK;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */

#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
Expand Down Expand Up @@ -1946,6 +1997,13 @@ static int __init ksm_init(void)

#endif /* CONFIG_SYSFS */

#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* Choose a high priority since the callback takes ksm_thread_mutex:
* later callbacks could only be taking locks which nest within that.
*/
hotplug_memory_notifier(ksm_memory_callback, 100);
#endif
return 0;

out_free2:
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);

out:
return ret;
Expand Down
19 changes: 7 additions & 12 deletions trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
Expand Down Expand Up @@ -413,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
/*
* The check for PageReserved here is important to avoid
* handling zero pages and other pages that may have been
* marked special by the system.
*
* If the PageReserved would not be checked here then f.e.
* the location of the zero page could have an influence
* on MPOL_MF_STRICT, zero pages would be counted for
* the per node stats, and there would be useless attempts
* to put zero pages on the migration list.
* vm_normal_page() filters out zero pages, but there might
* still be PageReserved pages to skip, perhaps in a VDSO.
* And we cannot move PageKsm pages sensibly or safely yet.
*/
if (PageReserved(page))
if (PageReserved(page) || PageKsm(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
Expand Down Expand Up @@ -839,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);

if (!list_empty(&pagelist))
err = migrate_pages(&pagelist, new_node_page, dest);
err = migrate_pages(&pagelist, new_node_page, dest, 0);

return err;
}
Expand Down Expand Up @@ -1056,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len,

if (!list_empty(&pagelist))
nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma);
(unsigned long)vma, 0);

if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
Expand Down
27 changes: 21 additions & 6 deletions trunk/mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force)
struct page *page, int force, int offlining)
{
int rc = 0;
int *result = NULL;
Expand All @@ -569,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
lock_page(page);
}

/*
* Only memory hotplug's offline_pages() caller has locked out KSM,
* and can safely migrate a KSM page. The other cases have skipped
* PageKsm along with PageReserved - but it is only now when we have
* the page lock that we can be certain it will not go KSM beneath us
* (KSM will not upgrade a page from PageAnon to PageKsm when it sees
* its pagecount raised, but only here do we take the page lock which
* serializes that).
*/
if (PageKsm(page) && !offlining) {
rc = -EBUSY;
goto unlock;
}

/* charge against new page */
charge = mem_cgroup_prepare_migration(page, &mem);
if (charge == -ENOMEM) {
Expand Down Expand Up @@ -685,7 +699,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* Return: Number of pages not migrated or error code.
*/
int migrate_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private)
new_page_t get_new_page, unsigned long private, int offlining)
{
int retry = 1;
int nr_failed = 0;
Expand All @@ -705,7 +719,7 @@ int migrate_pages(struct list_head *from,
cond_resched();

rc = unmap_and_move(get_new_page, private,
page, pass > 2);
page, pass > 2, offlining);

switch(rc) {
case -ENOMEM:
Expand Down Expand Up @@ -801,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
if (!page)
goto set_status;

if (PageReserved(page)) /* Check for zero page */
/* Use PageReserved to check for zero page */
if (PageReserved(page) || PageKsm(page))
goto put_and_set;

pp->page = page;
Expand Down Expand Up @@ -838,7 +853,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = 0;
if (!list_empty(&pagelist))
err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm);
(unsigned long)pm, 0);

up_read(&mm->mmap_sem);
return err;
Expand Down Expand Up @@ -959,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,

err = -ENOENT;
/* Use PageReserved to check for zero page */
if (!page || PageReserved(page))
if (!page || PageReserved(page) || PageKsm(page))
goto set_status;

err = page_to_nid(page);
Expand Down

0 comments on commit 0c58b2c

Please sign in to comment.