Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 199816
b: refs/heads/master
c: 3f505ca
h: refs/heads/master
v: v3
  • Loading branch information
Albert Herranz authored and Linus Torvalds committed Jun 4, 2010
1 parent 2e14b00 commit 32169d5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 33 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1da083c9b23dafd6bcb08dcfec443e66e90efff0
refs/heads/master: 3f505ca45735c35576dab4ceb3e3736d528b6672
40 changes: 8 additions & 32 deletions trunk/drivers/video/fb_defio.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,41 +155,25 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
struct list_head *node, *next;
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page, *tmp_page;
struct list_head *node, *tmp_node;
struct list_head non_dirty;

INIT_LIST_HEAD(&non_dirty);

/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
lock_page(page);
/*
* The workqueue callback can be triggered after a
* ->page_mkwrite() call but before the PTE has been marked
* dirty. In this case page_mkclean() won't "rearm" the page.
*
* To avoid this, remove those "non-dirty" pages from the
* pagelist before calling the driver's callback, then add
* them back to get processed on the next work iteration.
* At that time, their PTEs will hopefully be dirty for real.
*/
if (!page_mkclean(page))
list_move_tail(&page->lru, &non_dirty);
unlock_page(page);
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
lock_page(cur);
page_mkclean(cur);
unlock_page(cur);
}

/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);

/* clear the list... */
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
/* clear the list */
list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
/* ... and add back the "non-dirty" pages to the list */
list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}

Expand Down Expand Up @@ -218,21 +202,13 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct list_head *node, *tmp_node;
struct page *page;
int i;

BUG_ON(!fbdefio);
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();

/* the list may have still some non-dirty pages at this point */
mutex_lock(&fbdefio->lock);
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
list_del(node);
}
mutex_unlock(&fbdefio->lock);

/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
Expand Down

0 comments on commit 32169d5

Please sign in to comment.