Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 272110
b: refs/heads/master
c: f80c067
h: refs/heads/master
v: v3
  • Loading branch information
Minchan Kim authored and Linus Torvalds committed Nov 1, 2011
1 parent 1ece0a7 commit b2780e0
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 39deaf8585152f1a35c1676d3d7dc6ae0fb65967
refs/heads/master: f80c0673610e36ae29d63e3297175e22f70dde5f
2 changes: 2 additions & 0 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,8 @@ static inline int is_unevictable_lru(enum lru_list l)
#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
/* Isolate clean file */
#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)

/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
Expand Down
20 changes: 18 additions & 2 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1048,6 +1048,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
return ret;

if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
return ret;

if (likely(get_page_unless_zero(page))) {
/*
* Be careful not to clear PageLRU until after we're
Expand Down Expand Up @@ -1471,6 +1474,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
reclaim_mode |= ISOLATE_ACTIVE;

lru_add_drain();

if (!sc->may_unmap)
reclaim_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
reclaim_mode |= ISOLATE_CLEAN;

spin_lock_irq(&zone->lru_lock);

if (scanning_global_lru(sc)) {
Expand Down Expand Up @@ -1588,19 +1597,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
unsigned long nr_rotated = 0;
isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;

lru_add_drain();

if (!sc->may_unmap)
reclaim_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
reclaim_mode |= ISOLATE_CLEAN;

spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_pages, &l_hold,
&pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
reclaim_mode, zone,
1, file);
zone->pages_scanned += pgscanned;
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
&pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
reclaim_mode, zone,
sc->mem_cgroup, 1, file);
/*
* mem_cgroup_isolate_pages() keeps track of
Expand Down

0 comments on commit b2780e0

Please sign in to comment.