Skip to content

Commit

Permalink
Revert "fb_defio: fix for non-dirty ptes"
Browse files Browse the repository at this point in the history
This reverts commit 49bbd81 ("fb_defio:
fix for non-dirty ptes").

Although the fix provided is correct, it's been suggested to avoid the
underlying race in the same way as it is currently done in filesystems
like NFS, for maintainability.

A following patch "fb_defio: redo fix for non-dirty ptes" will provide
such an alternate fix.

Signed-off-by: Albert Herranz <albert_herranz@yahoo.es>
Cc: Jaya Kumar <jayakumar.lkml@gmail.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Albert Herranz authored and Linus Torvalds committed Jun 4, 2010
1 parent 1da083c commit 3f505ca
Showing 1 changed file with 8 additions and 32 deletions.
40 changes: 8 additions & 32 deletions drivers/video/fb_defio.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,41 +155,25 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
struct list_head *node, *next;
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page, *tmp_page;
struct list_head *node, *tmp_node;
struct list_head non_dirty;

INIT_LIST_HEAD(&non_dirty);

/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
lock_page(page);
/*
* The workqueue callback can be triggered after a
* ->page_mkwrite() call but before the PTE has been marked
* dirty. In this case page_mkclean() won't "rearm" the page.
*
* To avoid this, remove those "non-dirty" pages from the
* pagelist before calling the driver's callback, then add
* them back to get processed on the next work iteration.
* At that time, their PTEs will hopefully be dirty for real.
*/
if (!page_mkclean(page))
list_move_tail(&page->lru, &non_dirty);
unlock_page(page);
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
lock_page(cur);
page_mkclean(cur);
unlock_page(cur);
}

/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);

/* clear the list... */
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
/* clear the list */
list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
/* ... and add back the "non-dirty" pages to the list */
list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}

Expand Down Expand Up @@ -218,21 +202,13 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct list_head *node, *tmp_node;
struct page *page;
int i;

BUG_ON(!fbdefio);
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();

/* the list may have still some non-dirty pages at this point */
mutex_lock(&fbdefio->lock);
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
list_del(node);
}
mutex_unlock(&fbdefio->lock);

/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
Expand Down

0 comments on commit 3f505ca

Please sign in to comment.