Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 14828
b: refs/heads/master
c: a145dd4
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Nov 30, 2005
1 parent b164e6c commit 7006dff
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f8e988436b9d83b20bc5cc378f1a8034816ae6a6
refs/heads/master: a145dd411eb28c83ee4bb68b66f62c326c0f764e
1 change: 1 addition & 0 deletions trunk/include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -956,6 +956,7 @@ struct page *vmalloc_to_page(void *addr);
unsigned long vmalloc_to_pfn(void *addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);

struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
Expand Down
36 changes: 34 additions & 2 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1172,7 +1172,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
spinlock_t *ptl;

retval = -EINVAL;
if (PageAnon(page) || !PageReserved(page))
if (PageAnon(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
Expand All @@ -1196,6 +1196,35 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
return retval;
}

/*
* This allows drivers to insert individual pages they've allocated
* into a user vma.
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (which is mainly an issue of doing "set_page_count(page, 1)" for
* each sub-page, and then freeing them one by one when you free
* them rather than freeing it as a compound page).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
* that. Your vma protection will have to be set up correctly, which
* means that if you want a shared writable mapping, you'd better
* ask for a shared writable mapping!
*
* The page does not need to be reserved.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL_GPL(vm_insert_page);

/*
* Somebody does a pfn remapping that doesn't actually work as a vma.
*
Expand Down Expand Up @@ -1225,8 +1254,11 @@ static int incomplete_pfn_remap(struct vm_area_struct *vma,
if (!pfn_valid(pfn))
return -EINVAL;

retval = 0;
page = pfn_to_page(pfn);
if (!PageReserved(page))
return -EINVAL;

retval = 0;
while (start < end) {
retval = insert_page(vma->vm_mm, start, page, prot);
if (retval < 0)
Expand Down

0 comments on commit 7006dff

Please sign in to comment.