Skip to content

Commit

Permalink
[PATCH] mm: ptd_alloc take ptlock
Browse files Browse the repository at this point in the history
Second step in pushing down the page_table_lock.  Remove the temporary
bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not
to hold page_table_lock, whether it's on init_mm or a user mm; take
page_table_lock internally to check if a racing task already allocated.

Convert their callers from common code.  But avoid coming back to change them
again later: instead of moving the spin_lock(&mm->page_table_lock) down,
switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which
encapsulate the mapping+locking and unlocking+unmapping together, and in the
end may use alternatives to the mm page_table_lock itself.

These callers all hold mmap_sem (some exclusively, some not), so at no level
can a page table be whipped away from beneath them; and pte_alloc uses the
"atomic" pmd_present to test whether it needs to allocate.  It appears that on
all arches we can safely descend without page_table_lock.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Oct 30, 2005
1 parent 1bb3630 commit c74df32
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 135 deletions.
14 changes: 5 additions & 9 deletions fs/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,40 +309,36 @@ void install_arg_page(struct vm_area_struct *vma,
pud_t * pud;
pmd_t * pmd;
pte_t * pte;
spinlock_t *ptl;

if (unlikely(anon_vma_prepare(vma)))
goto out_sig;
goto out;

flush_dcache_page(page);
pgd = pgd_offset(mm, address);

spin_lock(&mm->page_table_lock);
pud = pud_alloc(mm, pgd, address);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_map(mm, pmd, address);
pte = pte_alloc_map_lock(mm, pmd, address, &ptl);
if (!pte)
goto out;
if (!pte_none(*pte)) {
pte_unmap(pte);
pte_unmap_unlock(pte, ptl);
goto out;
}
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
page_add_anon_rmap(page, vma, address);
pte_unmap(pte);
spin_unlock(&mm->page_table_lock);
pte_unmap_unlock(pte, ptl);

/* no need for flush_tlb */
return;
out:
spin_unlock(&mm->page_table_lock);
out_sig:
__free_page(page);
force_sig(SIGKILL, current);
}
Expand Down
18 changes: 18 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -779,10 +779,28 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */

#define pte_offset_map_lock(mm, pmd, address, ptlp) \
({ \
spinlock_t *__ptl = &(mm)->page_table_lock; \
pte_t *__pte = pte_offset_map(pmd, address); \
*(ptlp) = __ptl; \
spin_lock(__ptl); \
__pte; \
})

#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
pte_unmap(pte); \
} while (0)

#define pte_alloc_map(mm, pmd, address) \
((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
NULL: pte_offset_map(pmd, address))

#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
NULL: pte_offset_map_lock(mm, pmd, address, ptlp))

#define pte_alloc_kernel(pmd, address) \
((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
NULL: pte_offset_kernel(pmd, address))
Expand Down
2 changes: 0 additions & 2 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,6 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
/*
* Link in the new vma and copy the page table entries.
*/
spin_lock(&mm->page_table_lock);
*pprev = tmp;
pprev = &tmp->vm_next;

Expand All @@ -265,7 +264,6 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)

mm->map_count++;
retval = copy_page_range(mm, oldmm, tmp);
spin_unlock(&mm->page_table_lock);

if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
Expand Down
48 changes: 18 additions & 30 deletions mm/fremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,23 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
pud_t *pud;
pgd_t *pgd;
pte_t pte_val;
spinlock_t *ptl;

BUG_ON(vma->vm_flags & VM_RESERVED);

pgd = pgd_offset(mm, addr);
spin_lock(&mm->page_table_lock);

pud = pud_alloc(mm, pgd, addr);
if (!pud)
goto err_unlock;

goto out;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
goto err_unlock;

pte = pte_alloc_map(mm, pmd, addr);
goto out;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
goto err_unlock;
goto out;

/*
* This page may have been truncated. Tell the
Expand All @@ -89,10 +86,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
inode = vma->vm_file->f_mapping->host;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (!page->mapping || page->index >= size)
goto err_unlock;
goto unlock;
err = -ENOMEM;
if (page_mapcount(page) > INT_MAX/2)
goto err_unlock;
goto unlock;

if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
inc_mm_counter(mm, file_rss);
Expand All @@ -101,17 +98,15 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte_at(mm, addr, pte, mk_pte(page, prot));
page_add_file_rmap(page);
pte_val = *pte;
pte_unmap(pte);
update_mmu_cache(vma, addr, pte_val);

err = 0;
err_unlock:
spin_unlock(&mm->page_table_lock);
unlock:
pte_unmap_unlock(pte, ptl);
out:
return err;
}
EXPORT_SYMBOL(install_page);


/*
* Install a file pte to a given virtual memory address, release any
* previously existing mapping.
Expand All @@ -125,23 +120,20 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
pud_t *pud;
pgd_t *pgd;
pte_t pte_val;
spinlock_t *ptl;

BUG_ON(vma->vm_flags & VM_RESERVED);

pgd = pgd_offset(mm, addr);
spin_lock(&mm->page_table_lock);

pud = pud_alloc(mm, pgd, addr);
if (!pud)
goto err_unlock;

goto out;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
goto err_unlock;

pte = pte_alloc_map(mm, pmd, addr);
goto out;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
goto err_unlock;
goto out;

if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
update_hiwater_rss(mm);
Expand All @@ -150,17 +142,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,

set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
pte_val = *pte;
pte_unmap(pte);
update_mmu_cache(vma, addr, pte_val);
spin_unlock(&mm->page_table_lock);
return 0;

err_unlock:
spin_unlock(&mm->page_table_lock);
pte_unmap_unlock(pte, ptl);
err = 0;
out:
return err;
}


/***
* sys_remap_file_pages - remap arbitrary pages of a shared backing store
* file within an existing vma.
Expand Down
12 changes: 8 additions & 4 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,19 +277,23 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
unsigned long addr;

for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
src_pte = huge_pte_offset(src, addr);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr);
if (!dst_pte)
goto nomem;
spin_lock(&dst->page_table_lock);
spin_lock(&src->page_table_lock);
src_pte = huge_pte_offset(src, addr);
if (src_pte && !pte_none(*src_pte)) {
if (!pte_none(*src_pte)) {
entry = *src_pte;
ptepage = pte_page(entry);
get_page(ptepage);
add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(dst, addr, dst_pte, entry);
}
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
}
return 0;

Expand Down Expand Up @@ -354,7 +358,6 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)

hugetlb_prefault_arch_hook(mm);

spin_lock(&mm->page_table_lock);
for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
unsigned long idx;
pte_t *pte = huge_pte_alloc(mm, addr);
Expand Down Expand Up @@ -389,11 +392,12 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out;
}
}
spin_lock(&mm->page_table_lock);
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page));
spin_unlock(&mm->page_table_lock);
}
out:
spin_unlock(&mm->page_table_lock);
return ret;
}

Expand Down
Loading

0 comments on commit c74df32

Please sign in to comment.