From 235ae8a9f85092b967b70af731aa0494975de05d Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 28 Aug 2005 16:49:11 +1000 Subject: [PATCH] --- yaml --- r: 6222 b: refs/heads/master c: d992895ba2b27cf5adf1ba0ad6d27662adc54c5e h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/memory.c | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 540a73d390d0..1c4536c93f7f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 40193713df2cdb9c233b3fc2029ecdccb40cb1e4 +refs/heads/master: d992895ba2b27cf5adf1ba0ad6d27662adc54c5e diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index e046b7e4b530..a596c1172248 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; + /* + * Don't copy ptes where a page fault will fill them correctly. + * Fork becomes much lighter when there are big shared or private + * readonly mappings. The tradeoff is that copy_page_range is more + * efficient than faulting. + */ + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { + if (!vma->anon_vma) + return 0; + } + if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma);