Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 2469
b: refs/heads/master
c: c475a8a
h: refs/heads/master
i:
  2467: d63e9d4
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Jun 22, 2005
1 parent bf3f9e3 commit ff8efd9
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d296e9cd02c92e576ecce5344026a4df4353cdb2
refs/heads/master: c475a8ab625d567eacf5e30ec35d6d8704558062
10 changes: 5 additions & 5 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1686,23 +1686,23 @@ static int do_swap_page(struct mm_struct * mm,
}

/* The page isn't present yet, go ahead with the fault. */

swap_free(entry);
if (vm_swap_full())
remove_exclusive_swap_page(page);

inc_mm_counter(mm, rss);
pte = mk_pte(page, vma->vm_page_prot);
if (write_access && can_share_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
write_access = 0;
}
unlock_page(page);

flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte);
page_add_anon_rmap(page, vma, address);

swap_free(entry);
if (vm_swap_full())
remove_exclusive_swap_page(page);
unlock_page(page);

if (write_access) {
if (do_wp_page(mm, vma, address,
page_table, pmd, pte) == VM_FAULT_OOM)
Expand Down
21 changes: 0 additions & 21 deletions trunk/mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,27 +539,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
goto out_unmap;
}

/*
* Don't pull an anonymous page out from under get_user_pages.
* GUP carefully breaks COW and raises page count (while holding
* page_table_lock, as we have here) to make sure that the page
* cannot be freed. If we unmap that page here, a user write
* access to the virtual address will bring back the page, but
* its raised count will (ironically) be taken to mean it's not
* an exclusive swap page, do_wp_page will replace it by a copy
* page, and the user never get to see the data GUP was holding
* the original page for.
*
* This test is also useful for when swapoff (unuse_process) has
* to drop page lock: its reference to the page stops existing
* ptes from being unmapped, so swapoff can make progress.
*/
if (PageSwapCache(page) &&
page_count(page) != page_mapcount(page) + 2) {
ret = SWAP_FAIL;
goto out_unmap;
}

/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
pteval = ptep_clear_flush(vma, address, pte);
Expand Down
55 changes: 16 additions & 39 deletions trunk/mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,61 +276,37 @@ void swap_free(swp_entry_t entry)
}

/*
* Check if we're the only user of a swap page,
* when the page is locked.
* How many references to page are currently swapped out?
*/
static int exclusive_swap_page(struct page *page)
static inline int page_swapcount(struct page *page)
{
int retval = 0;
struct swap_info_struct * p;
int count = 0;
struct swap_info_struct *p;
swp_entry_t entry;

entry.val = page->private;
p = swap_info_get(entry);
if (p) {
/* Is the only swap cache user the cache itself? */
if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */
write_lock_irq(&swapper_space.tree_lock);
if (page_count(page) == 2)
retval = 1;
write_unlock_irq(&swapper_space.tree_lock);
}
/* Subtract the 1 for the swap cache itself */
count = p->swap_map[swp_offset(entry)] - 1;
swap_info_put(p);
}
return retval;
return count;
}

/*
* We can use this swap cache entry directly
* if there are no other references to it.
*
* Here "exclusive_swap_page()" does the real
* work, but we opportunistically check whether
* we need to get all the locks first..
*/
int can_share_swap_page(struct page *page)
{
int retval = 0;
int count;

if (!PageLocked(page))
BUG();
switch (page_count(page)) {
case 3:
if (!PagePrivate(page))
break;
/* Fallthrough */
case 2:
if (!PageSwapCache(page))
break;
retval = exclusive_swap_page(page);
break;
case 1:
if (PageReserved(page))
break;
retval = 1;
}
return retval;
BUG_ON(!PageLocked(page));
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page))
count += page_swapcount(page);
return count == 1;
}

/*
Expand Down Expand Up @@ -529,9 +505,10 @@ static int unuse_mm(struct mm_struct *mm,

if (!down_read_trylock(&mm->mmap_sem)) {
/*
* Our reference to the page stops try_to_unmap_one from
* unmapping its ptes, so swapoff can make progress.
* Activate page so shrink_cache is unlikely to unmap its
* ptes while lock is dropped, so swapoff can make progress.
*/
activate_page(page);
unlock_page(page);
down_read(&mm->mmap_sem);
lock_page(page);
Expand Down

0 comments on commit ff8efd9

Please sign in to comment.