Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 42654
b: refs/heads/master
c: 39dde65
h: refs/heads/master
v: v3
  • Loading branch information
Chen, Kenneth W authored and Linus Torvalds committed Dec 7, 2006
1 parent 08ff75a commit a92ddf3
Show file tree
Hide file tree
Showing 9 changed files with 145 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e1dbeda60a7ea9e82a908d93c07308d104d50d79
refs/heads/master: 39dde65c9940c97fcd178a3d2b1c57ed8b7b68aa
112 changes: 111 additions & 1 deletion trunk/arch/i386/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,113 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>

static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx)
{
unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
svma->vm_start;
unsigned long sbase = saddr & PUD_MASK;
unsigned long s_end = sbase + PUD_SIZE;

/*
* match the virtual addresses, permission and the alignment of the
* page table page.
*/
if (pmd_index(addr) != pmd_index(saddr) ||
vma->vm_flags != svma->vm_flags ||
sbase < svma->vm_start || svma->vm_end < s_end)
return 0;

return saddr;
}

static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
{
unsigned long base = addr & PUD_MASK;
unsigned long end = base + PUD_SIZE;

/*
* check on proper vm_flags and page table alignment
*/
if (vma->vm_flags & VM_MAYSHARE &&
vma->vm_start <= base && end <= vma->vm_end)
return 1;
return 0;
}

/*
* search for a shareable pmd page for hugetlb.
*/
static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
struct prio_tree_iter iter;
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;

if (!vma_shareable(vma, addr))
return;

spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;

saddr = page_table_shareable(svma, vma, addr, idx);
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr);
if (spte) {
get_page(virt_to_page(spte));
break;
}
}
}

if (!spte)
goto out;

spin_lock(&mm->page_table_lock);
if (pud_none(*pud))
pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
else
put_page(virt_to_page(spte));
spin_unlock(&mm->page_table_lock);
out:
spin_unlock(&mapping->i_mmap_lock);
}

/*
* unmap huge page backed by shared pte.
*
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
* called with vma->vm_mm->page_table_lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
*/
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
pgd_t *pgd = pgd_offset(mm, *addr);
pud_t *pud = pud_offset(pgd, *addr);

BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
return 0;

pud_clear(pud);
put_page(virt_to_page(ptep));
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
return 1;
}

pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
Expand All @@ -25,8 +132,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)

pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (pud)
if (pud) {
if (pud_none(*pud))
huge_pmd_share(mm, addr, pud);
pte = (pte_t *) pmd_alloc(mm, pud, addr);
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

return pte;
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/ia64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
return pte;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}

#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }

/*
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/powerpc/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return hugepte_offset(hpdp, addr);
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}

static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
{
pte_t *hugepte = hugepd_page(*hpdp);
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/sh/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}

struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/sh64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}

void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/sparc64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}

void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ extern int sysctl_hugetlb_shm_group;

pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Expand Down
7 changes: 7 additions & 0 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
if (!ptep)
continue;

if (huge_pmd_unshare(mm, &address, ptep))
continue;

pte = huge_ptep_get_and_clear(mm, address, ptep);
if (pte_none(pte))
continue;
Expand Down Expand Up @@ -658,11 +661,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
BUG_ON(address >= end);
flush_cache_range(vma, address, end);

spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
spin_lock(&mm->page_table_lock);
for (; address < end; address += HPAGE_SIZE) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
if (!pte_none(*ptep)) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
Expand All @@ -671,6 +677,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
}
}
spin_unlock(&mm->page_table_lock);
spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);

flush_tlb_range(vma, start, end);
}
Expand Down

0 comments on commit a92ddf3

Please sign in to comment.