Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 365447
b: refs/heads/master
c: 5bc7b8a
h: refs/heads/master
i:
  365445: 1901803
  365443: 663eecd
  365439: 1317529
v: v3
  • Loading branch information
Shaohua Li authored and Linus Torvalds committed Apr 29, 2013
1 parent 71e1d10 commit 2796795
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1eec6702a80e04416d528846a5ff2122484d95ec
refs/heads/master: 5bc7b8aca942d03bf2716ddcfcb4e0b57e43a1b8
11 changes: 10 additions & 1 deletion trunk/include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,11 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
extern int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page(struct page *page);
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
extern void __split_huge_page_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd);
#define split_huge_page_pmd(__vma, __address, __pmd) \
Expand Down Expand Up @@ -186,6 +190,11 @@ extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vm
#define transparent_hugepage_enabled(__vma) 0

#define transparent_hugepage_flags 0UL
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
return 0;
}
static inline int split_huge_page(struct page *page)
{
return 0;
Expand Down
6 changes: 3 additions & 3 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ extern unsigned long nr_free_pagecache_pages(void);
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec);
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
Expand Down Expand Up @@ -346,7 +346,7 @@ extern struct address_space swapper_spaces[];
#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *);
extern int add_to_swap(struct page *, struct list_head *list);
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
extern void __delete_from_swap_cache(struct page *);
Expand Down Expand Up @@ -465,7 +465,7 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}

static inline int add_to_swap(struct page *page)
static inline int add_to_swap(struct page *page, struct list_head *list)
{
return 0;
}
Expand Down
21 changes: 15 additions & 6 deletions trunk/mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1559,7 +1559,8 @@ static int __split_huge_page_splitting(struct page *page,
return ret;
}

static void __split_huge_page_refcount(struct page *page)
static void __split_huge_page_refcount(struct page *page,
struct list_head *list)
{
int i;
struct zone *zone = page_zone(page);
Expand Down Expand Up @@ -1645,7 +1646,7 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));

lru_add_page_tail(page, page_tail, lruvec);
lru_add_page_tail(page, page_tail, lruvec, list);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);
Expand Down Expand Up @@ -1752,7 +1753,8 @@ static int __split_huge_page_map(struct page *page,

/* must be called with anon_vma->root->rwsem held */
static void __split_huge_page(struct page *page,
struct anon_vma *anon_vma)
struct anon_vma *anon_vma,
struct list_head *list)
{
int mapcount, mapcount2;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
Expand Down Expand Up @@ -1783,7 +1785,7 @@ static void __split_huge_page(struct page *page,
mapcount, page_mapcount(page));
BUG_ON(mapcount != page_mapcount(page));

__split_huge_page_refcount(page);
__split_huge_page_refcount(page, list);

mapcount2 = 0;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
Expand All @@ -1798,7 +1800,14 @@ static void __split_huge_page(struct page *page,
BUG_ON(mapcount != mapcount2);
}

int split_huge_page(struct page *page)
/*
* Split a hugepage into normal pages. This doesn't change the position of head
* page. If @list is null, tail pages will be added to LRU list, otherwise, to
* @list. Both head page and tail pages will inherit mapping, flags, and so on
* from the hugepage.
* Return 0 if the hugepage is split successfully otherwise return 1.
*/
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct anon_vma *anon_vma;
int ret = 1;
Expand All @@ -1823,7 +1832,7 @@ int split_huge_page(struct page *page)
goto out_unlock;

BUG_ON(!PageSwapBacked(page));
__split_huge_page(page, anon_vma);
__split_huge_page(page, anon_vma, list);
count_vm_event(THP_SPLIT);

BUG_ON(PageCompound(page));
Expand Down
11 changes: 8 additions & 3 deletions trunk/mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ EXPORT_SYMBOL(__pagevec_release);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* used by __split_huge_page_refcount() */
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec)
struct lruvec *lruvec, struct list_head *list)
{
int uninitialized_var(active);
enum lru_list lru;
Expand All @@ -749,7 +749,8 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON(NR_CPUS != 1 &&
!spin_is_locked(&lruvec_zone(lruvec)->lru_lock));

SetPageLRU(page_tail);
if (!list)
SetPageLRU(page_tail);

if (page_evictable(page_tail)) {
if (PageActive(page)) {
Expand All @@ -767,7 +768,11 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,

if (likely(PageLRU(page)))
list_add_tail(&page_tail->lru, &page->lru);
else {
else if (list) {
/* page reclaim is reclaiming a huge page */
get_page(page_tail);
list_add_tail(&page_tail->lru, list);
} else {
struct list_head *list_head;
/*
* Head page has not yet been counted, as an hpage,
Expand Down
4 changes: 2 additions & 2 deletions trunk/mm/swap_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ void __delete_from_swap_cache(struct page *page)
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
int add_to_swap(struct page *page)
int add_to_swap(struct page *page, struct list_head *list)
{
swp_entry_t entry;
int err;
Expand All @@ -173,7 +173,7 @@ int add_to_swap(struct page *page)
return 0;

if (unlikely(PageTransHuge(page)))
if (unlikely(split_huge_page(page))) {
if (unlikely(split_huge_page_to_list(page, list))) {
swapcache_free(entry, NULL);
return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -781,7 +781,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageAnon(page) && !PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
if (!add_to_swap(page))
if (!add_to_swap(page, page_list))
goto activate_locked;
may_enter_fs = 1;
}
Expand Down

0 comments on commit 2796795

Please sign in to comment.