From 4f14f7ae1929e4263f492302514657bbd2d1e73e Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Jan 2010 17:01:39 -0800 Subject: [PATCH] --- yaml --- r: 179363 b: refs/heads/master c: 7e6608724c640924aad1d556d17df33ebaa6124d h: refs/heads/master i: 179361: c8fbe2ca97423d52f3358806238994ad0c3dfa6c 179359: 868644397c960e77719c868a0214af6da243e391 v: v3 --- [refs] | 2 +- trunk/fs/ramfs/file-nommu.c | 31 +------------------ trunk/include/linux/mm.h | 1 + trunk/mm/nommu.c | 62 +++++++++++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 31 deletions(-) diff --git a/[refs] b/[refs] index 4380982e4bc4..c904dcbd0596 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 81759b5b221107488bda99fe7beeb7b734f61133 +refs/heads/master: 7e6608724c640924aad1d556d17df33ebaa6124d diff --git a/trunk/fs/ramfs/file-nommu.c b/trunk/fs/ramfs/file-nommu.c index 266531343aae..1739a4aba25f 100644 --- a/trunk/fs/ramfs/file-nommu.c +++ b/trunk/fs/ramfs/file-nommu.c @@ -121,35 +121,6 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) return ret; } -/*****************************************************************************/ -/* - * check that file shrinkage doesn't leave any VMAs dangling in midair - */ -static int ramfs_nommu_check_mappings(struct inode *inode, - size_t newsize, size_t size) -{ - struct vm_area_struct *vma; - struct prio_tree_iter iter; - - down_write(&nommu_region_sem); - - /* search for VMAs that fall within the dead zone */ - vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, - newsize >> PAGE_SHIFT, - (size + PAGE_SIZE - 1) >> PAGE_SHIFT - ) { - /* found one - only interested if it's shared out of the page - * cache */ - if (vma->vm_flags & VM_SHARED) { - up_write(&nommu_region_sem); - return -ETXTBSY; /* not quite true, but near enough */ - } - } - - up_write(&nommu_region_sem); - return 0; -} - /*****************************************************************************/ /* * @@ -169,7 +140,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) /* check that a decrease in size doesn't cut off any shared mappings */ if (newsize < size) { - ret = ramfs_nommu_check_mappings(inode, newsize, size); + ret = nommu_shrink_inode_mappings(inode, size, newsize); if (ret < 0) return ret; } diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index 2265f28eb47a..60c467bfbabd 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1089,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; +extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); diff --git a/trunk/mm/nommu.c b/trunk/mm/nommu.c index 32be0cf51ba6..48a2ecfaf059 100644 --- a/trunk/mm/nommu.c +++ b/trunk/mm/nommu.c @@ -1914,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in mmput(mm); return len; } + +/** + * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode + * @inode: The inode to check + * @size: The current filesize of the inode + * @newsize: The proposed filesize of the inode + * + * Check the shared mappings on an inode on behalf of a shrinking truncate to + * make sure that that any outstanding VMAs aren't broken and then shrink the + * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't + * automatically grant mappings that are too large. + */ +int nommu_shrink_inode_mappings(struct inode *inode, size_t size, + size_t newsize) +{ + struct vm_area_struct *vma; + struct prio_tree_iter iter; + struct vm_region *region; + pgoff_t low, high; + size_t r_size, r_top; + + low = newsize >> PAGE_SHIFT; + high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + down_write(&nommu_region_sem); + + /* search for VMAs that fall within the dead zone */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, + low, high) { + /* found one - only interested if it's shared out of the page + * cache */ + if (vma->vm_flags & VM_SHARED) { + up_write(&nommu_region_sem); + return -ETXTBSY; /* not quite true, but near enough */ + } + } + + /* reduce any regions that overlap the dead zone - if in existence, + * these will be pointed to by VMAs that don't overlap the dead zone + * + * we don't check for any regions that start beyond the EOF as there + * shouldn't be any + */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, + 0, ULONG_MAX) { + if (!(vma->vm_flags & VM_SHARED)) + continue; + + region = vma->vm_region; + r_size = region->vm_top - region->vm_start; + r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; + + if (r_top > newsize) { + region->vm_top -= r_top - newsize; + if (region->vm_end > region->vm_top) + region->vm_end = region->vm_top; + } + } + + up_write(&nommu_region_sem); + return 0; +}