From 52062fbca756b5fb0d8ce453a38b311354287f9d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 29 Nov 2005 14:07:55 -0800 Subject: [PATCH] --- yaml --- r: 14776 b: refs/heads/master c: 5d2a2dbbc1025dbf7998b9289574d9592b8f21cc h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/memory.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 5d7c7c870aad..095e83b1ffb5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c9cfcddfd65735437a4cb8563d6b66a6da8a5ed6 +refs/heads/master: 5d2a2dbbc1025dbf7998b9289574d9592b8f21cc diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 74f95ae0510b..745b3482e6c2 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1394,8 +1394,15 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo */ if (unlikely(!src)) { void *kaddr = kmap_atomic(dst, KM_USER0); - unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE); - if (left) + void __user *uaddr = (void __user *)(va & PAGE_MASK); + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) memset(kaddr, 0, PAGE_SIZE); kunmap_atomic(kaddr, KM_USER0); return;