Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 16840
b: refs/heads/master
c: 8419c31
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jan 9, 2006
1 parent 3253ff3 commit 0ea874b
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 79 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 39743889aaf76725152f16aa90ca3c45f6d52da3
refs/heads/master: 8419c3181086c86664e8246bc997afc2e4ffba4f
3 changes: 1 addition & 2 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t);
extern int shrink_all_memory(int);
extern int vm_swappiness;

#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l);

#ifdef CONFIG_MIGRATION
extern int migrate_pages(struct list_head *l, struct list_head *t);
#endif

Expand Down
152 changes: 76 additions & 76 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -569,6 +569,40 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
}

#ifdef CONFIG_MIGRATION
static inline void move_to_lru(struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
* the PG_active bit is off.
*/
ClearPageActive(page);
lru_cache_add_active(page);
} else {
lru_cache_add(page);
}
put_page(page);
}

/*
* Add isolated pages on the list back to the LRU
*
* returns the number of pages put back.
*/
int putback_lru_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
int count = 0;

list_for_each_entry_safe(page, page2, l, lru) {
move_to_lru(page);
count++;
}
return count;
}

/*
* swapout a single page
* page is locked upon entry, unlocked on exit
Expand Down Expand Up @@ -709,6 +743,48 @@ int migrate_pages(struct list_head *l, struct list_head *t)

return nr_failed + retry;
}

static void lru_add_drain_per_cpu(void *dummy)
{
lru_add_drain();
}

/*
* Isolate one page from the LRU lists and put it on the
* indicated list. Do necessary cache draining if the
* page is not on the LRU lists yet.
*
* Result:
* 0 = page not on LRU list
* 1 = page removed from LRU list and added to the specified list.
* -ENOENT = page is being freed elsewhere.
*/
int isolate_lru_page(struct page *page)
{
int rc = 0;
struct zone *zone = page_zone(page);

redo:
spin_lock_irq(&zone->lru_lock);
rc = __isolate_lru_page(page);
if (rc == 1) {
if (PageActive(page))
del_page_from_active_list(zone, page);
else
del_page_from_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
if (rc == 0) {
/*
* Maybe this page is still waiting for a cpu to drain it
* from one of the lru lists?
*/
rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
if (rc == 0 && PageLRU(page))
goto redo;
}
return rc;
}
#endif

/*
Expand Down Expand Up @@ -758,48 +834,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
return nr_taken;
}

static void lru_add_drain_per_cpu(void *dummy)
{
lru_add_drain();
}

/*
* Isolate one page from the LRU lists and put it on the
* indicated list. Do necessary cache draining if the
* page is not on the LRU lists yet.
*
* Result:
* 0 = page not on LRU list
* 1 = page removed from LRU list and added to the specified list.
* -ENOENT = page is being freed elsewhere.
*/
int isolate_lru_page(struct page *page)
{
int rc = 0;
struct zone *zone = page_zone(page);

redo:
spin_lock_irq(&zone->lru_lock);
rc = __isolate_lru_page(page);
if (rc == 1) {
if (PageActive(page))
del_page_from_active_list(zone, page);
else
del_page_from_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
if (rc == 0) {
/*
* Maybe this page is still waiting for a cpu to drain it
* from one of the lru lists?
*/
rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
if (rc == 0 && PageLRU(page))
goto redo;
}
return rc;
}

/*
* shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
*/
Expand Down Expand Up @@ -865,40 +899,6 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
pagevec_release(&pvec);
}

static inline void move_to_lru(struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
* the PG_active bit is off.
*/
ClearPageActive(page);
lru_cache_add_active(page);
} else {
lru_cache_add(page);
}
put_page(page);
}

/*
* Add isolated pages on the list back to the LRU
*
* returns the number of pages put back.
*/
int putback_lru_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
int count = 0;

list_for_each_entry_safe(page, page2, l, lru) {
move_to_lru(page);
count++;
}
return count;
}

/*
* This moves pages from the active list to the inactive list.
*
Expand Down

0 comments on commit 0ea874b

Please sign in to comment.