Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105290
b: refs/heads/master
c: c37f9fb
h: refs/heads/master
v: v3
  • Loading branch information
Andy Whitcroft authored and Linus Torvalds committed Jul 24, 2008
1 parent 8cd7218 commit 660b3ef
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9682290484370ce68ba23cd2ec2838e301934199
refs/heads/master: c37f9fb11c976ffc08200d631dada6dcbfd07ea4
58 changes: 53 additions & 5 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,9 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
/* Decrement the reserved pages in the hugepage pool by one */
static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_NORESERVE)
return;

if (vma->vm_flags & VM_SHARED) {
/* Shared mappings always use reserves */
resv_huge_pages--;
Expand Down Expand Up @@ -720,25 +723,65 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
}
}

/*
* Determine if the huge page at addr within the vma has an associated
* reservation. Where it does not we will need to logically increase
* reservation and actually increase quota before an allocation can occur.
* Where any new reservation would be required the reservation change is
* prepared, but not committed. Once the page has been quota'd allocated
* an instantiated the change should be committed via vma_commit_reservation.
* No action is required on failure.
*/
static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;

if (vma->vm_flags & VM_SHARED) {
pgoff_t idx = vma_pagecache_offset(vma, addr);
return region_chg(&inode->i_mapping->private_list,
idx, idx + 1);

} else {
if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
}

return 0;
}
static void vma_commit_reservation(struct vm_area_struct *vma,
unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;

if (vma->vm_flags & VM_SHARED) {
pgoff_t idx = vma_pagecache_offset(vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1);
}
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
struct page *page;
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
unsigned int chg = 0;
unsigned int chg;

/*
* Processes that did not create the mapping will have no reserves and
* will not have accounted against quota. Check that the quota can be
* made before satisfying the allocation
* MAP_NORESERVE mappings may also need pages and quota allocated
* if no reserve mapping overlaps.
*/
if (!(vma->vm_flags & VM_SHARED) &&
!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
chg = 1;
chg = vma_needs_reservation(vma, addr);
if (chg < 0)
return ERR_PTR(chg);
if (chg)
if (hugetlb_get_quota(inode->i_mapping, chg))
return ERR_PTR(-ENOSPC);
}

spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(vma, addr, avoid_reserve);
Expand All @@ -755,6 +798,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_refcounted(page);
set_page_private(page, (unsigned long) mapping);

vma_commit_reservation(vma, addr);

return page;
}

Expand Down Expand Up @@ -1560,6 +1605,9 @@ int hugetlb_reserve_pages(struct inode *inode,
{
long ret, chg;

if (vma && vma->vm_flags & VM_NORESERVE)
return 0;

/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
Expand Down

0 comments on commit 660b3ef

Please sign in to comment.