Skip to content

Commit

Permalink
[PATCH] splice: fixup writeout path after ->map changes
Browse files Browse the repository at this point in the history
Since ->map() no longer locks the page, we need to adjust the handling
of those pages (and stealing) a little. This now passes full regressions
again.

Signed-off-by: Jens Axboe <axboe@suse.de>
  • Loading branch information
Jens Axboe committed Apr 19, 2006
1 parent a4514eb commit 9e0267c
Showing 1 changed file with 30 additions and 19 deletions.
49 changes: 30 additions & 19 deletions fs/splice.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
struct page *page = buf->page;
struct address_space *mapping = page_mapping(page);

WARN_ON(!PageLocked(page));
lock_page(page);

WARN_ON(!PageUptodate(page));

/*
Expand All @@ -65,8 +66,10 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
if (PagePrivate(page))
try_to_release_page(page, mapping_gfp_mask(mapping));

if (!remove_mapping(mapping, page))
if (!remove_mapping(mapping, page)) {
unlock_page(page);
return 1;
}

buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
return 0;
Expand Down Expand Up @@ -507,14 +510,12 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
if (sd->flags & SPLICE_F_MOVE) {
/*
* If steal succeeds, buf->page is now pruned from the vm
* side (LRU and page cache) and we can reuse it.
* side (LRU and page cache) and we can reuse it. The page
* will also be looked on successful return.
*/
if (buf->ops->steal(info, buf))
goto find_page;

/*
* this will also set the page locked
*/
page = buf->page;
if (add_to_page_cache(page, mapping, index, gfp_mask))
goto find_page;
Expand All @@ -523,15 +524,27 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
lru_cache_add(page);
} else {
find_page:
ret = -ENOMEM;
page = find_or_create_page(mapping, index, gfp_mask);
if (!page)
goto out_nomem;
page = find_lock_page(mapping, index);
if (!page) {
ret = -ENOMEM;
page = page_cache_alloc_cold(mapping);
if (unlikely(!page))
goto out_nomem;

/*
* This will also lock the page
*/
ret = add_to_page_cache_lru(page, mapping, index,
gfp_mask);
if (unlikely(ret))
goto out;
}

/*
* If the page is uptodate, it is also locked. If it isn't
* uptodate, we can mark it uptodate if we are filling the
* full page. Otherwise we need to read it in first...
* We get here with the page locked. If the page is also
* uptodate, we don't need to do more. If it isn't, we
* may need to bring it in if we are not going to overwrite
* the full page.
*/
if (!PageUptodate(page)) {
if (sd->len < PAGE_CACHE_SIZE) {
Expand All @@ -553,10 +566,8 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
ret = -EIO;
goto out;
}
} else {
WARN_ON(!PageLocked(page));
} else
SetPageUptodate(page);
}
}
}

Expand Down Expand Up @@ -585,10 +596,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
mark_page_accessed(page);
balance_dirty_pages_ratelimited(mapping);
out:
if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
page_cache_release(page);
unlock_page(page);
}

unlock_page(page);
out_nomem:
buf->ops->unmap(info, buf);
return ret;
Expand Down

0 comments on commit 9e0267c

Please sign in to comment.