Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 115855
b: refs/heads/master
c: 64d6519
h: refs/heads/master
i:
  115853: a242959
  115851: 8e5f180
  115847: ffe96aa
  115839: 6c0a546
v: v3
  • Loading branch information
Lee Schermerhorn authored and Linus Torvalds committed Oct 20, 2008
1 parent 8a7b82f commit caddaf0
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5344b7e648980cc2ca613ec03a56a8222ff48820
refs/heads/master: 64d6519dda3905dfb94d3f93c07c5f263f41813f
2 changes: 2 additions & 0 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_cache_add_active_or_unevictable(struct page *,
struct vm_area_struct *);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
Expand Down
18 changes: 10 additions & 8 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1922,12 +1922,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* thread doing COW.
*/
ptep_clear_flush_notify(vma, address, page_table);
set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
SetPageSwapBacked(new_page);
lru_cache_add_active_anon(new_page);
lru_cache_add_active_or_unevictable(new_page, vma);
page_add_new_anon_rmap(new_page, vma, address);

//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
if (old_page) {
/*
* Only after switching the pte to the new page may
Expand Down Expand Up @@ -2420,7 +2421,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto release;
inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page);
lru_cache_add_active_anon(page);
lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);

Expand Down Expand Up @@ -2564,12 +2565,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
set_pte_at(mm, address, page_table, entry);
if (anon) {
inc_mm_counter(mm, anon_rss);
inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page);
lru_cache_add_active_anon(page);
page_add_new_anon_rmap(page, vma, address);
lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(page);
Expand All @@ -2578,6 +2578,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
get_page(dirty_page);
}
}
//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry);

/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, entry);
Expand Down
21 changes: 21 additions & 0 deletions trunk/mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>

#include "internal.h"

/* How many pages do we try to swap or page in/out together? */
int page_cluster;

Expand Down Expand Up @@ -244,6 +246,25 @@ void add_page_to_unevictable_list(struct page *page)
spin_unlock_irq(&zone->lru_lock);
}

/**
* lru_cache_add_active_or_unevictable
* @page: the page to be added to LRU
* @vma: vma in which page is mapped for determining reclaimability
*
* place @page on active or unevictable LRU list, depending on
* page_evictable(). Note that if the page is not evictable,
* it goes directly back onto it's zone's unevictable list. It does
* NOT use a per cpu pagevec.
*/
void lru_cache_add_active_or_unevictable(struct page *page,
struct vm_area_struct *vma)
{
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
else
add_page_to_unevictable_list(page);
}

/*
* Drain pages out of the cpu's pagevecs.
* Either "cpu" is the current CPU, and preemption has already been
Expand Down

0 comments on commit caddaf0

Please sign in to comment.