Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 83048
b: refs/heads/master
c: 46017e9
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Feb 5, 2008
1 parent be435b4 commit d5b9c77
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 61 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c4cc6d07b2f465fbf5efd99bbe772a49c515f3f2
refs/heads/master: 46017e954826ac59e91df76341a3f76b45467847
19 changes: 7 additions & 12 deletions trunk/include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,6 @@ struct swap_list_t {
/* Swap 50% full? Release swapcache more aggressively.. */
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)

/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);

/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
Expand Down Expand Up @@ -230,9 +227,12 @@ extern int move_from_swap_cache(struct page *, unsigned long,
struct address_space *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma,
unsigned long addr);
extern struct page *lookup_swap_cache(swp_entry_t);
extern struct page *read_swap_cache_async(swp_entry_t,
struct vm_area_struct *vma, unsigned long addr);
extern struct page *swapin_readahead(swp_entry_t,
struct vm_area_struct *vma, unsigned long addr);

/* linux/mm/swapfile.c */
extern long total_swap_pages;
extern unsigned int nr_swapfiles;
Expand Down Expand Up @@ -306,7 +306,7 @@ static inline void swap_free(swp_entry_t swp)
{
}

static inline struct page *read_swap_cache_async(swp_entry_t swp,
static inline struct page *swapin_readahead(swp_entry_t swp,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
Expand All @@ -317,11 +317,6 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}

static inline int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
return 0;
}

#define can_share_swap_page(p) (page_mapcount(p) == 1)

static inline int move_to_swap_cache(struct page *page, swp_entry_t entry)
Expand Down
45 changes: 1 addition & 44 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1980,48 +1980,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
return 0;
}

/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @addr: address to start
* @vma: user vma this addresses belong to
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
* Caller must hold down_read on the vma->vm_mm if vma is not NULL.
*/
void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
{
int nr_pages;
struct page *page;
unsigned long offset;
unsigned long end_offset;

/*
* Get starting offset for readaround, and number of pages to read.
* Adjust starting address by readbehind (for NUMA interleave case)?
* No, it's very unlikely that swap layout would follow vma layout,
* more likely that neighbouring swap pages came from the same node:
* so use the same "addr" to choose the same node for each swap read.
*/
nr_pages = valid_swaphandles(entry, &offset);
for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
/* Ok, do the async read-ahead now */
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
vma, addr);
if (!page)
break;
page_cache_release(page);
}
lru_add_drain(); /* Push any new pages onto the LRU now */
}

/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
Expand Down Expand Up @@ -2049,8 +2007,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = lookup_swap_cache(entry);
if (!page) {
grab_swap_token(); /* Contend for token _before_ read-in */
swapin_readahead(entry, address, vma);
page = read_swap_cache_async(entry, vma, address);
page = swapin_readahead(entry, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
Expand Down
6 changes: 2 additions & 4 deletions trunk/mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1036,8 +1036,7 @@ static struct page *shmem_swapin(struct shmem_inode_info *info,
pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
swapin_readahead(entry, 0, &pvma);
page = read_swap_cache_async(entry, &pvma, 0);
page = swapin_readahead(entry, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
Expand Down Expand Up @@ -1067,8 +1066,7 @@ static inline int shmem_parse_mpol(char *value, int *policy,
static inline struct page *
shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
{
swapin_readahead(entry, 0, NULL);
return read_swap_cache_async(entry, NULL, 0);
return swapin_readahead(entry, NULL, 0);
}

static inline struct page *
Expand Down
47 changes: 47 additions & 0 deletions trunk/mm/swap_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
Expand Down Expand Up @@ -368,3 +369,49 @@ struct page *read_swap_cache_async(swp_entry_t entry,
page_cache_release(new_page);
return found_page;
}

/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @vma: user vma this address belongs to
* @addr: target address for mempolicy
*
* Returns the struct page for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
* Caller must hold down_read on the vma->vm_mm if vma is not NULL.
*/
struct page *swapin_readahead(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr)
{
int nr_pages;
struct page *page;
unsigned long offset;
unsigned long end_offset;

/*
* Get starting offset for readaround, and number of pages to read.
* Adjust starting address by readbehind (for NUMA interleave case)?
* No, it's very unlikely that swap layout would follow vma layout,
* more likely that neighbouring swap pages came from the same node:
* so use the same "addr" to choose the same node for each swap read.
*/
nr_pages = valid_swaphandles(entry, &offset);
for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
/* Ok, do the async read-ahead now */
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
vma, addr);
if (!page)
break;
page_cache_release(page);
}
lru_add_drain(); /* Push any new pages onto the LRU now */
return read_swap_cache_async(entry, vma, addr);
}

0 comments on commit d5b9c77

Please sign in to comment.