From 499fe090d42ff6099ab00bb5086ec3f6fc38f8bc Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 22 Mar 2006 00:08:53 -0800 Subject: [PATCH] --- yaml --- r: 22487 b: refs/heads/master c: 3935baa9bcda3ccaee4f7849f5157d316e34412e h: refs/heads/master i: 22485: ff70801fee1bc05b2beb981a36e4ce819e89659c 22483: 51e1d24f6f93aace84999ac05a015f1f5bd70446 22479: 52afa3e65ed5b47e116dd844c189aa106b3f65a3 v: v3 --- [refs] | 2 +- trunk/mm/hugetlb.c | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/[refs] b/[refs] index 5dd26778ee5f..009e67087425 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 79ac6ba40eb8d70f0d204e98ae9b63280ad1018c +refs/heads/master: 3935baa9bcda3ccaee4f7849f5157d316e34412e diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 41b1038f76da..d5987a87bbe5 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -26,6 +27,10 @@ unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; +/* + * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages + */ +static DEFINE_SPINLOCK(hugetlb_lock); static void clear_huge_page(struct page *page, unsigned long addr) { @@ -50,11 +55,6 @@ static void copy_huge_page(struct page *dst, struct page *src, } } -/* - * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages - */ -static DEFINE_SPINLOCK(hugetlb_lock); - static void enqueue_huge_page(struct page *page) { int nid = page_to_nid(page); @@ -508,14 +508,24 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *ptep; pte_t entry; int ret; + static DEFINE_MUTEX(hugetlb_instantiation_mutex); ptep = huge_pte_alloc(mm, address); if (!ptep) return VM_FAULT_OOM; + /* + * Serialize hugepage allocation and instantiation, so that we don't + * get spurious allocation failures if two CPUs race to instantiate + * the same page in the page cache. + */ + mutex_lock(&hugetlb_instantiation_mutex); entry = *ptep; - if (pte_none(entry)) - return hugetlb_no_page(mm, vma, address, ptep, write_access); + if (pte_none(entry)) { + ret = hugetlb_no_page(mm, vma, address, ptep, write_access); + mutex_unlock(&hugetlb_instantiation_mutex); + return ret; + } ret = VM_FAULT_MINOR; @@ -525,6 +535,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (write_access && !pte_write(entry)) ret = hugetlb_cow(mm, vma, address, ptep, entry); spin_unlock(&mm->page_table_lock); + mutex_unlock(&hugetlb_instantiation_mutex); return ret; }