Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 83052
b: refs/heads/master
c: 5402b97
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Feb 5, 2008
1 parent 892bbec commit 4a4ebec
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 107 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d3602444e1e3485890eea5f61366e19a287c00c4
refs/heads/master: 5402b976ae0be96b3a32f3508ab7308c380d6477
109 changes: 3 additions & 106 deletions trunk/mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
* Normally, filepage is NULL on entry, and either found
* uptodate immediately, or allocated and zeroed, or read
* in under swappage, which is then assigned to filepage.
* But shmem_readpage and shmem_write_begin pass in a locked
* But shmem_readpage (required for splice) passes in a locked
* filepage, which may be found not uptodate by other callers
* too, and may need to be copied from the swappage read in.
*/
Expand Down Expand Up @@ -1476,110 +1476,6 @@ shmem_write_end(struct file *file, struct address_space *mapping,
return copied;
}

static ssize_t
shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
loff_t pos;
unsigned long written;
ssize_t err;

if ((ssize_t) count < 0)
return -EINVAL;

if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;

mutex_lock(&inode->i_mutex);

pos = *ppos;
written = 0;

err = generic_write_checks(file, &pos, &count, 0);
if (err || !count)
goto out;

err = remove_suid(file->f_path.dentry);
if (err)
goto out;

inode->i_ctime = inode->i_mtime = CURRENT_TIME;

do {
struct page *page = NULL;
unsigned long bytes, index, offset;
char *kaddr;
int left;

offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
bytes = PAGE_CACHE_SIZE - offset;
if (bytes > count)
bytes = count;

/*
* We don't hold page lock across copy from user -
* what would it guard against? - so no deadlock here.
* But it still may be a good idea to prefault below.
*/

err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
if (err)
break;

unlock_page(page);
left = bytes;
if (PageHighMem(page)) {
volatile unsigned char dummy;
__get_user(dummy, buf);
__get_user(dummy, buf + bytes - 1);

kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user_inatomic(kaddr + offset,
buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
}
if (left) {
kaddr = kmap(page);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap(page);
}

written += bytes;
count -= bytes;
pos += bytes;
buf += bytes;
if (pos > inode->i_size)
i_size_write(inode, pos);

flush_dcache_page(page);
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);

if (left) {
pos -= left;
written -= left;
err = -EFAULT;
break;
}

/*
* Our dirty pages are not counted in nr_dirty,
* and we do not attempt to balance dirty pages.
*/

cond_resched();
} while (count);

*ppos = pos;
if (written)
err = written;
out:
mutex_unlock(&inode->i_mutex);
return err;
}

static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
struct inode *inode = filp->f_path.dentry->d_inode;
Expand Down Expand Up @@ -2354,7 +2250,8 @@ static const struct file_operations shmem_file_operations = {
#ifdef CONFIG_TMPFS
.llseek = generic_file_llseek,
.read = shmem_file_read,
.write = shmem_file_write,
.write = do_sync_write,
.aio_write = generic_file_aio_write,
.fsync = simple_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
Expand Down

0 comments on commit 4a4ebec

Please sign in to comment.