Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 96536
b: refs/heads/master
c: 362a61a
h: refs/heads/master
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed May 14, 2008
1 parent 8c9cce0 commit 0e71954
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 73f10281ea96d7e8b4fc1c5d755a7c8eb484155b
refs/heads/master: 362a61ad61199e19a61b8e432015e2586b288f5b
21 changes: 19 additions & 2 deletions trunk/include/asm-alpha/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -287,17 +287,34 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))

/*
* The smp_read_barrier_depends() in the following functions are required to
* order the load of *dir (the pointer in the top level page table) with any
* subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
*
* If this ordering is not enforced, the CPU might load an older value of
* *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
* more details.
*
* Note that we never change the mm->pgd pointer after the task is running, so
* pgd_offset does not require such a barrier.
*/

/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */
return ret;
}

/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{
return (pte_t *) pmd_page_vaddr(*dir)
pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */
return ret;
}

#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
Expand Down
21 changes: 21 additions & 0 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
if (!new)
return -ENOMEM;

/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */

spin_lock(&mm->page_table_lock);
if (!pmd_present(*pmd)) { /* Has another populated it ? */
mm->nr_ptes++;
Expand All @@ -329,6 +344,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
if (!new)
return -ENOMEM;

smp_wmb(); /* See comment in __pte_alloc */

spin_lock(&init_mm.page_table_lock);
if (!pmd_present(*pmd)) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
Expand Down Expand Up @@ -2619,6 +2636,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
if (!new)
return -ENOMEM;

smp_wmb(); /* See comment in __pte_alloc */

spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
pud_free(mm, new);
Expand All @@ -2640,6 +2659,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
if (!new)
return -ENOMEM;

smp_wmb(); /* See comment in __pte_alloc */

spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) /* Another has populated it */
Expand Down

0 comments on commit 0e71954

Please sign in to comment.