From 3a05d9a7eece69d2c53a37dd308f2b4c20dd2751 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Sep 2009 11:50:14 +0200 Subject: [PATCH] --- yaml --- r: 165735 b: refs/heads/master c: 6746aff74da293b5fd24e5c68b870b721e86cd5f h: refs/heads/master i: 165733: 04fd0f3cd8b97ffd61f9f771dc0bde104c5eacad 165731: 9ba918f29b7b19671be536bf64e2a35a8c85be48 165727: 6dde4fd95a01b11bf23377287c482d529bb2cab6 v: v3 --- [refs] | 2 +- trunk/mm/page-writeback.c | 7 +++++++ trunk/mm/shmem.c | 4 ++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 3ac4167991aa..9d83b5e9e749 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 257187362123f15d9d1e09918cf87cebbea4e786 +refs/heads/master: 6746aff74da293b5fd24e5c68b870b721e86cd5f diff --git a/trunk/mm/page-writeback.c b/trunk/mm/page-writeback.c index dd73d29c15a8..bba82c414ba8 100644 --- a/trunk/mm/page-writeback.c +++ b/trunk/mm/page-writeback.c @@ -1149,6 +1149,13 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) EXPORT_SYMBOL(redirty_page_for_writepage); /* + * Dirty a page. + * + * For pages with a mapping this should be done under the page lock + * for the benefit of asynchronous memory errors who prefer a consistent + * dirty state. This rule can be broken in some special cases, + * but should be better not to. + * * If the mapping doesn't provide a set_page_dirty a_op, then * just fall through and assume that it wants buffer_heads. */ diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index 5a0b3d4055f3..46936601e37f 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -1630,8 +1630,8 @@ shmem_write_end(struct file *file, struct address_space *mapping, if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); - unlock_page(page); set_page_dirty(page); + unlock_page(page); page_cache_release(page); return copied; @@ -1968,13 +1968,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s iput(inode); return error; } - unlock_page(page); inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); kunmap_atomic(kaddr, KM_USER0); set_page_dirty(page); + unlock_page(page); page_cache_release(page); } if (dir->i_mode & S_ISGID)