From dd0bd6a52b361e4fda0764e9ca6c6ddc224838cc Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 17 Apr 2013 15:58:30 -0700 Subject: [PATCH] --- yaml --- r: 362595 b: refs/heads/master c: 9cc3a5bd40067b9a0fbd49199d0780463fc2140f h: refs/heads/master i: 362593: 451ee889d426c34a14e30274bc1e1af148190a57 362591: 9272f78fc0c9a8e1c2f8713c816c017e4829db8e v: v3 --- [refs] | 2 +- trunk/mm/hugetlb.c | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index ef564fdf8b1a..ace15685bbd5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 23d9e482136e31c9d287633a6e473daa172767c4 +refs/heads/master: 9cc3a5bd40067b9a0fbd49199d0780463fc2140f diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index ca9a7c6d7e97..1a12f5b9a0ab 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - if (absent || + /* + * We need call hugetlb_fault for both hugepages under migration + * (in which case hugetlb_fault waits for the migration,) and + * hwpoisoned hugepages (in which case we need to prevent the + * caller from accessing to them.) In order to do this, we use + * here is_swap_pte instead of is_hugetlb_entry_migration and + * is_hugetlb_entry_hwpoisoned. This is because it simply covers + * both cases, and because we can't follow correct pages + * directly from any kind of swap entries. + */ + if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret;