Skip to content

Commit

Permalink
s390/uaccess: get rid of indirect function calls
Browse files Browse the repository at this point in the history
There are only two uaccess variants on s390 left: the version that is used
if the mvcos instruction is available, and the page table walk variant.
So there is no need for expensive indirect function calls.

By default the mvcos variant will be called. If the mvcos instruction is not
available it will call the page table walk variant.

For minimal performance impact the "if (mvcos_is_available)" is implemented
with a jump label, which will be a six byte nop on machines with mvcos.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Heiko Carstens authored and Martin Schwidefsky committed Feb 21, 2014
1 parent cfa785e commit 4f41c2b
Show file tree
Hide file tree
Showing 7 changed files with 152 additions and 149 deletions.
13 changes: 5 additions & 8 deletions arch/s390/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@
#include <linux/uaccess.h>
#include <asm/errno.h>

static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);

static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
Expand All @@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
oparg = 1 << oparg;

pagefault_disable();
ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
pagefault_enable();

if (!ret) {
Expand All @@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}

static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
}

#endif /* _ASM_S390_FUTEX_H */
148 changes: 61 additions & 87 deletions arch/s390/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE

struct uaccess_ops {
size_t (*copy_from_user)(void *, const void __user *, size_t);
size_t (*copy_to_user)(void __user *, const void *, size_t);
size_t (*copy_in_user)(void __user *, const void __user *, size_t);
size_t (*clear_user)(void __user *, size_t);
size_t (*strnlen_user)(const char __user *, size_t);
size_t (*strncpy_from_user)(char *, const char __user *, size_t);
int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
};
int __handle_fault(unsigned long, unsigned long, int);

extern struct uaccess_ops uaccess;
extern struct uaccess_ops uaccess_mvcos;
extern struct uaccess_ops uaccess_pt;
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
size_t __must_check __copy_from_user(void *to, const void __user *from,
size_t n);

/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned long __must_check __copy_to_user(void __user *to, const void *from,
unsigned long n);

extern int __handle_fault(unsigned long, unsigned long, int);
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

static inline int __put_user_fn(void *x, void __user *ptr, size_t size)
{
size = uaccess.copy_to_user(ptr, x, size);
return size ? -EFAULT : size;
size = __copy_to_user(ptr, x, size);
return size ? -EFAULT : 0;
}

static inline int __get_user_fn(void *x, const void __user *ptr, size_t size)
{
size = uaccess.copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : 0;
}

/*
Expand Down Expand Up @@ -152,7 +177,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, size_t size)
})


extern int __put_user_bad(void) __attribute__((noreturn));
int __put_user_bad(void) __attribute__((noreturn));

#define __get_user(x, ptr) \
({ \
Expand Down Expand Up @@ -200,34 +225,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
__get_user(x, ptr); \
})

extern int __get_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn));

#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user

/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
return uaccess.copy_to_user(to, from, n);
}

#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
Expand All @@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user(to, from, n);
}

/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
return uaccess.copy_from_user(to, from, n);
}

extern void copy_from_user_overflow(void)
void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
Expand Down Expand Up @@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
return __copy_from_user(to, from, n);
}

static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
return uaccess.copy_in_user(to, from, n);
}
unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n);

static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
Expand All @@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
/*
* Copy a null terminated string from userspace.
*/

long __strncpy_from_user(char *dst, const char __user *src, long count);

static inline long __must_check
strncpy_from_user(char *dst, const char __user *src, long count)
{
might_fault();
return uaccess.strncpy_from_user(dst, src, count);
return __strncpy_from_user(dst, src, count);
}

static inline unsigned long
strnlen_user(const char __user * src, unsigned long n)
size_t __must_check __strnlen_user(const char __user *src, size_t count);

static inline size_t strnlen_user(const char __user *src, size_t n)
{
might_fault();
return uaccess.strnlen_user(src, n);
return __strnlen_user(src, n);
}

/**
Expand All @@ -355,21 +335,15 @@ strnlen_user(const char __user * src, unsigned long n)
/*
* Zero Userspace
*/
size_t __must_check __clear_user(void __user *to, size_t size);

static inline unsigned long __must_check
__clear_user(void __user *to, unsigned long n)
{
return uaccess.clear_user(to, n);
}

static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
static inline size_t __must_check clear_user(void __user *to, size_t n)
{
might_fault();
return uaccess.clear_user(to, n);
return __clear_user(to, n);
}

extern int copy_to_user_real(void __user *dest, void *src, size_t count);
extern int copy_from_user_real(void *dest, void __user *src, size_t count);
int copy_to_user_real(void __user *dest, void *src, size_t count);
int copy_from_user_real(void *dest, void __user *src, size_t count);

#endif /* __S390_UACCESS_H */
9 changes: 0 additions & 9 deletions arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
#include <linux/compat.h>

#include <asm/ipl.h>
#include <asm/uaccess.h>
#include <asm/facility.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
Expand All @@ -64,12 +63,6 @@
#include <asm/sclp.h>
#include "entry.h"

/*
* User copy operations.
*/
struct uaccess_ops uaccess;
EXPORT_SYMBOL(uaccess);

/*
* Machine setup..
*/
Expand Down Expand Up @@ -1009,8 +1002,6 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;

uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;

parse_early_param();
detect_memory_layout(memory_chunk, memory_end);
os_info_init();
Expand Down
3 changes: 1 addition & 2 deletions arch/s390/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
# Makefile for s390-specific library files..
#

lib-y += delay.o string.o uaccess_pt.o find.o
lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
8 changes: 6 additions & 2 deletions arch/s390/lib/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
#ifndef __ARCH_S390_LIB_UACCESS_H
#define __ARCH_S390_LIB_UACCESS_H

extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
size_t copy_from_user_pt(void *to, const void __user *from, size_t n);
size_t copy_to_user_pt(void __user *to, const void *from, size_t n);
size_t copy_in_user_pt(void __user *to, const void __user *from, size_t n);
size_t clear_user_pt(void __user *to, size_t n);
size_t strnlen_user_pt(const char __user *src, size_t count);
size_t strncpy_from_user_pt(char *dst, const char __user *src, size_t count);

#endif /* __ARCH_S390_LIB_UACCESS_H */
Loading

0 comments on commit 4f41c2b

Please sign in to comment.