Skip to content

Commit

Permalink
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages …
Browse files Browse the repository at this point in the history
…- v3

Impact: New currently unused interface.

Add a generic interface to follow pfn in a pfnmap vma range. This is used by
one of the subsequent x86 PAT related patch to keep track of memory types
for vma regions across vma copy and free.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
  • Loading branch information
venkatesh.pallipadi@intel.com authored and H. Peter Anvin committed Dec 18, 2008
1 parent 3c8bb73 commit e121e41
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 0 deletions.
3 changes: 3 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1223,6 +1223,9 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */

int follow_pfnmap_pte(struct vm_area_struct *vma,
unsigned long address, pte_t *ret_ptep);

typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
Expand Down
43 changes: 43 additions & 0 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1111,6 +1111,49 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
return page;
}

int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
pte_t *ret_ptep)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;

if (!is_pfn_mapping(vma))
goto err;

page = NULL;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto err;

pud = pud_offset(pgd, address);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
goto err;

pmd = pmd_offset(pud, address);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto err;

ptep = pte_offset_map_lock(mm, pmd, address, &ptl);

pte = *ptep;
if (!pte_present(pte))
goto err_unlock;

*ret_ptep = pte;
pte_unmap_unlock(ptep, ptl);
return 0;

err_unlock:
pte_unmap_unlock(ptep, ptl);
err:
return -EINVAL;
}

/* Can we do the FOLL_ANON optimization? */
static inline int use_zero_page(struct vm_area_struct *vma)
{
Expand Down

0 comments on commit e121e41

Please sign in to comment.