From bcc81c12d0acdf928c25e6478b765730ad0f75b4 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 30 Jan 2007 14:36:27 +1100 Subject: [PATCH] --- yaml --- r: 45898 b: refs/heads/master c: 87df7241bd547da5d4d4a4e5397866dfe422e439 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/fs/buffer.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 27959b7079fa..39128bdae27c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4cbf2aa35e1c189db234190fefc6c83b139ef963 +refs/heads/master: 87df7241bd547da5d4d4a4e5397866dfe422e439 diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 460f1c43238e..1ad674fd348c 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -2844,7 +2844,6 @@ int try_to_free_buffers(struct page *page) spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); - spin_unlock(&mapping->private_lock); /* * If the filesystem writes its buffers by hand (eg ext3) @@ -2855,9 +2854,14 @@ int try_to_free_buffers(struct page *page) * Also, during truncate, discard_buffer will have marked all * the page's buffers clean. We discover that here and clean * the page also. + * + * private_lock must be held over this entire operation in order + * to synchronise against __set_page_dirty_buffers and prevent the + * dirty bit from being lost. */ if (ret) cancel_dirty_page(page, PAGE_CACHE_SIZE); + spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { struct buffer_head *bh = buffers_to_free;