Skip to content

Commit

Permalink
HWPOISON, hugetlb: detect hwpoison in hugetlb code
Browse files Browse the repository at this point in the history
This patch enables to block access to hwpoisoned hugepage and
also enables to block unmapping for it.

Dependency:
  "HWPOISON, hugetlb: enable error handling path for hugepage"

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Fengguang Wu <fengguang.wu@intel.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
  • Loading branch information
Naoya Horiguchi authored and Andi Kleen committed Aug 11, 2010
1 parent 93f70f9 commit fd6a03e
Showing 1 changed file with 40 additions and 0 deletions.
40 changes: 40 additions & 0 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#include <asm/page.h>
#include <asm/pgtable.h>
Expand Down Expand Up @@ -2149,6 +2151,19 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
return -ENOMEM;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
return 1;
} else
return 0;
}

void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
Expand Down Expand Up @@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
if (huge_pte_none(pte))
continue;

/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
continue;

page = pte_page(pte);
if (pte_dirty(pte))
set_page_dirty(page);
Expand Down Expand Up @@ -2490,6 +2511,18 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_dup_rmap(page);
}

/*
* Since memory error handler replaces pte into hwpoison swap entry
* at the time of error handling, a process which reserved but not have
* the mapping to the error hugepage does not have hwpoison swap entry.
* So we need to block accesses from such a process by checking
* PG_hwpoison bit here.
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON;
goto backout_unlocked;
}

/*
* If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that
Expand Down Expand Up @@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);

ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON;
}

ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
Expand Down

0 comments on commit fd6a03e

Please sign in to comment.