Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 28977
b: refs/heads/master
c: c22ce14
h: refs/heads/master
i:
  28975: 252ef28
v: v3
  • Loading branch information
Hiro Yoshioka authored and Linus Torvalds committed Jun 23, 2006
1 parent 80b61f0 commit 7813688
Show file tree
Hide file tree
Showing 6 changed files with 190 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7dbdf43cfa635ddc3701cc8d1eab07597cd731c0
refs/heads/master: c22ce143d15eb288543fe9873e1c5ac1c01b69a1
137 changes: 129 additions & 8 deletions trunk/arch/i386/lib/usercopy.c
Original file line number Diff line number Diff line change
Expand Up @@ -425,15 +425,121 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
: "eax", "edx", "memory");
return size;
}

/*
* Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
* hyoshiok@miraclelinux.com
*/

static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;

__asm__ __volatile__(
" .align 2,0x90\n"
"0: movl 32(%4), %%eax\n"
" cmpl $67, %0\n"
" jbe 2f\n"
"1: movl 64(%4), %%eax\n"
" .align 2,0x90\n"
"2: movl 0(%4), %%eax\n"
"21: movl 4(%4), %%edx\n"
" movnti %%eax, 0(%3)\n"
" movnti %%edx, 4(%3)\n"
"3: movl 8(%4), %%eax\n"
"31: movl 12(%4),%%edx\n"
" movnti %%eax, 8(%3)\n"
" movnti %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
" movnti %%eax, 16(%3)\n"
" movnti %%edx, 20(%3)\n"
"10: movl 24(%4), %%eax\n"
"51: movl 28(%4), %%edx\n"
" movnti %%eax, 24(%3)\n"
" movnti %%edx, 28(%3)\n"
"11: movl 32(%4), %%eax\n"
"61: movl 36(%4), %%edx\n"
" movnti %%eax, 32(%3)\n"
" movnti %%edx, 36(%3)\n"
"12: movl 40(%4), %%eax\n"
"71: movl 44(%4), %%edx\n"
" movnti %%eax, 40(%3)\n"
" movnti %%edx, 44(%3)\n"
"13: movl 48(%4), %%eax\n"
"81: movl 52(%4), %%edx\n"
" movnti %%eax, 48(%3)\n"
" movnti %%edx, 52(%3)\n"
"14: movl 56(%4), %%eax\n"
"91: movl 60(%4), %%edx\n"
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
" addl $64, %4\n"
" addl $64, %3\n"
" cmpl $63, %0\n"
" ja 0b\n"
" sfence \n"
"5: movl %0, %%eax\n"
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
"6: rep; movsl\n"
" movl %%eax,%0\n"
"7: rep; movsb\n"
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
"16: pushl %0\n"
" pushl %%eax\n"
" xorl %%eax,%%eax\n"
" rep; stosb\n"
" popl %%eax\n"
" popl %0\n"
" jmp 8b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,16b\n"
" .long 1b,16b\n"
" .long 2b,16b\n"
" .long 21b,16b\n"
" .long 3b,16b\n"
" .long 31b,16b\n"
" .long 4b,16b\n"
" .long 41b,16b\n"
" .long 10b,16b\n"
" .long 51b,16b\n"
" .long 11b,16b\n"
" .long 61b,16b\n"
" .long 12b,16b\n"
" .long 71b,16b\n"
" .long 13b,16b\n"
" .long 81b,16b\n"
" .long 14b,16b\n"
" .long 91b,16b\n"
" .long 6b,9b\n"
" .long 7b,16b\n"
".previous"
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
return size;
}

#else

/*
* Leave these declared but undefined. They should not be any references to
* them
*/
unsigned long
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
unsigned long
__copy_user_intel(void __user *to, const void *from, unsigned long size);
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
unsigned long __copy_user_intel(void __user *to, const void *from,
unsigned long size);
unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */

/* Generic arbitrary sized copy. */
Expand Down Expand Up @@ -515,8 +621,8 @@ do { \
: "memory"); \
} while (0)


unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
unsigned long __copy_to_user_ll(void __user *to, const void *from,
unsigned long n)
{
BUG_ON((long) n < 0);
#ifndef CONFIG_X86_WP_WORKS_OK
Expand Down Expand Up @@ -576,8 +682,8 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long
}
EXPORT_SYMBOL(__copy_to_user_ll);

unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n)
{
BUG_ON((long)n < 0);
if (movsl_is_ok(to, from, n))
Expand All @@ -588,6 +694,21 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
}
EXPORT_SYMBOL(__copy_from_user_ll);

unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n)
{
BUG_ON((long)n < 0);
#ifdef CONFIG_X86_INTEL_USERCOPY
if ( n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n);
else
__copy_user_zeroing(to, from, n);
#else
__copy_user_zeroing(to, from, n);
#endif
return n;
}

/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
Expand Down
33 changes: 33 additions & 0 deletions trunk/include/asm-i386/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,8 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll(void *to,
const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache(void *to,
const void __user *from, unsigned long n);

/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
Expand Down Expand Up @@ -478,12 +480,43 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}

#define ARCH_HAS_NOCACHE_UACCESS

static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;

switch (n) {
case 1:
__get_user_size(*(u8 *)to, from, 1, ret, 1);
return ret;
case 2:
__get_user_size(*(u16 *)to, from, 2, ret, 2);
return ret;
case 4:
__get_user_size(*(u32 *)to, from, 4, ret, 4);
return ret;
}
}
return __copy_from_user_ll_nocache(to, from, n);
}

static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_from_user_inatomic(to, from, n);
}

static __always_inline unsigned long
__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_from_user_inatomic_nocache(to, from, n);
}

unsigned long __must_check copy_to_user(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check copy_from_user(void *to,
Expand Down
22 changes: 22 additions & 0 deletions trunk/include/linux/uaccess.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__

#include <asm/uaccess.h>

#ifndef ARCH_HAS_NOCACHE_UACCESS

static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}

static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}

#endif /* ARCH_HAS_NOCACHE_UACCESS */

#endif /* __LINUX_UACCESS_H__ */
4 changes: 2 additions & 2 deletions trunk/mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
Expand All @@ -38,7 +39,6 @@
*/
#include <linux/buffer_head.h> /* for generic_osync_inode */

#include <asm/uaccess.h>
#include <asm/mman.h>

static ssize_t
Expand Down Expand Up @@ -1902,7 +1902,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
int copy = min(bytes, iov->iov_len - base);

base = 0;
left = __copy_from_user_inatomic(vaddr, buf, copy);
left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
Expand Down
6 changes: 3 additions & 3 deletions trunk/mm/filemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include <linux/highmem.h>
#include <linux/uio.h>
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/uaccess.h>

size_t
__filemap_copy_from_user_iovec(char *vaddr,
Expand All @@ -34,13 +34,13 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
int left;

kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);

if (left != 0) {
/* Do it the slow way */
kaddr = kmap(page);
left = __copy_from_user(kaddr + offset, buf, bytes);
left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
kunmap(page);
}
return bytes - left;
Expand Down

0 comments on commit 7813688

Please sign in to comment.