From d32e10d743c6b89a25f4d3906169bdd9afb710c2 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Mon, 23 Jun 2008 14:30:30 +0200 Subject: [PATCH] --- yaml --- r: 98385 b: refs/heads/master c: 945754a1754f9d4c2974a8241ad4f92fad7f3a6a h: refs/heads/master i: 98383: 826bc78094758b0abafc93ca2a5e04ea970de98c v: v3 --- [refs] | 2 +- trunk/mm/memory.c | 27 ++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 171a8ca76f96..e2377c237889 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 672ca28e300c17bf8d792a2a7a8631193e580c74 +refs/heads/master: 945754a1754f9d4c2974a8241ad4f92fad7f3a6a diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 423e0e7c2f73..d14b251a25a6 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1785,7 +1785,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { - page_remove_rmap(old_page, vma); if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); inc_mm_counter(mm, anon_rss); @@ -1807,6 +1806,32 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); + if (old_page) { + /* + * Only after switching the pte to the new page may + * we remove the mapcount here. Otherwise another + * process may come and find the rmap count decremented + * before the pte is switched to the new page, and + * "reuse" the old page writing into it while our pte + * here still points into it and can be read by other + * threads. + * + * The critical issue is to order this + * page_remove_rmap with the ptp_clear_flush above. + * Those stores are ordered by (if nothing else,) + * the barrier present in the atomic_add_negative + * in page_remove_rmap. + * + * Then the TLB flush in ptep_clear_flush ensures that + * no process can access the old page before the + * decremented mapcount is visible. And the old page + * cannot be reused until after the decremented + * mapcount is visible. So transitively, TLBs to + * old page will be flushed before it can be reused. + */ + page_remove_rmap(old_page, vma); + } + /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE;