Skip to content

Commit

Permalink
Merge of master.kernel.org:/home/rmk/linux-2.6-rmk.git
Browse files Browse the repository at this point in the history
  • Loading branch information
Linus Torvalds committed Apr 29, 2005
2 parents a879cbb + 53e173f commit dd96a8e
Show file tree
Hide file tree
Showing 11 changed files with 454 additions and 41 deletions.
2 changes: 1 addition & 1 deletion arch/arm/configs/ixdp2800_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0 pci=firmware"
CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0"
# CONFIG_XIP_KERNEL is not set

#
Expand Down
213 changes: 212 additions & 1 deletion arch/arm/kernel/entry-armv.S
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,12 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr

#if __LINUX_ARM_ARCH__ < 6
@ make sure our user space atomic helper is aborted
cmp r2, #VIRT_OFFSET
bichs r3, r3, #PSR_Z_BIT
#endif

@
@ We are now ready to fill in the remaining blanks on the stack:
@
Expand Down Expand Up @@ -499,8 +505,12 @@ ENTRY(__switch_to)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
#ifdef CONFIG_HAS_TLS_REG
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#else
mov r4, #0xffff0fff
str r3, [r4, #-3] @ Set TLS ptr
str r3, [r4, #-15] @ TLS val at 0xffff0ff0
#endif
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
Expand All @@ -519,6 +529,207 @@ ENTRY(__switch_to)
ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously

__INIT

/*
* User helpers.
*
* These are segment of kernel provided user code reachable from user space
* at a fixed address in kernel memory. This is used to provide user space
* with some operations which require kernel help because of unimplemented
* native feature and/or instructions in many ARM CPUs. The idea is for
* this code to be executed directly in user mode for best efficiency but
* which is too intimate with the kernel counter part to be left to user
* libraries. In fact this code might even differ from one CPU to another
* depending on the available instruction set and restrictions like on
* SMP systems. In other words, the kernel reserves the right to change
* this code as needed without warning. Only the entry points and their
* results are guaranteed to be stable.
*
* Each segment is 32-byte aligned and will be moved to the top of the high
* vector page. New segments (if ever needed) must be added in front of
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
*
* User space is expected to implement those things inline when optimizing
* for a processor that has the necessary native support, but only if such
* resulting binaries are already to be incompatible with earlier ARM
* processors due to the use of unsupported instructions other than what
* is provided here. In other words don't make binaries unable to run on
* earlier processors just for the sake of not using these kernel helpers
* if your compiled code is not going to use the new instructions for other
* purpose.
*/

.align 5
.globl __kuser_helper_start
__kuser_helper_start:

/*
* Reference prototype:
*
* int __kernel_cmpxchg(int oldval, int newval, int *ptr)
*
* Input:
*
* r0 = oldval
* r1 = newval
* r2 = ptr
* lr = return address
*
* Output:
*
* r0 = returned value (zero or non-zero)
* C flag = set if r0 == 0, clear if r0 != 0
*
* Clobbered:
*
* r3, ip, flags
*
* Definition and user space usage example:
*
* typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
* #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
*
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
* Return zero if *ptr was changed or non-zero if no exchange happened.
* The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code.
*
* For example, a user space atomic_add implementation could look like this:
*
* #define atomic_add(ptr, val) \
* ({ register unsigned int *__ptr asm("r2") = (ptr); \
* register unsigned int __result asm("r1"); \
* asm volatile ( \
* "1: @ atomic_add\n\t" \
* "ldr r0, [r2]\n\t" \
* "mov r3, #0xffff0fff\n\t" \
* "add lr, pc, #4\n\t" \
* "add r1, r0, %2\n\t" \
* "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
* "bcc 1b" \
* : "=&r" (__result) \
* : "r" (__ptr), "rIL" (val) \
* : "r0","r3","ip","lr","cc","memory" ); \
* __result; })
*/

__kuser_cmpxchg: @ 0xffff0fc0

#if __LINUX_ARM_ARCH__ < 6

#ifdef CONFIG_SMP /* sanity check */
#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
#endif

/*
* Theory of operation:
*
* We set the Z flag before loading oldval. If ever an exception
* occurs we can not be sure the loaded value will still be the same
* when the exception returns, therefore the user exception handler
* will clear the Z flag whenever the interrupted user code was
* actually from the kernel address space (see the usr_entry macro).
*
* The post-increment on the str is used to prevent a race with an
* exception happening just after the str instruction which would
* clear the Z flag although the exchange was done.
*/
teq ip, ip @ set Z flag
ldr ip, [r2] @ load current val
add r3, r2, #1 @ prepare store ptr
teqeq ip, r0 @ compare with oldval if still allowed
streq r1, [r3, #-1]! @ store newval if still allowed
subs r0, r2, r3 @ if r2 == r3 the str occured
mov pc, lr

#else

ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
rsbs r0, r3, #0
mov pc, lr

#endif

.align 5

/*
* Reference prototype:
*
* int __kernel_get_tls(void)
*
* Input:
*
* lr = return address
*
* Output:
*
* r0 = TLS value
*
* Clobbered:
*
* the Z flag might be lost
*
* Definition and user space usage example:
*
* typedef int (__kernel_get_tls_t)(void);
* #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
*
* Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
*
* This could be used as follows:
*
* #define __kernel_get_tls() \
* ({ register unsigned int __val asm("r0"); \
* asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
* : "=r" (__val) : : "lr","cc" ); \
* __val; })
*/

__kuser_get_tls: @ 0xffff0fe0

#ifndef CONFIG_HAS_TLS_REG

#ifdef CONFIG_SMP /* sanity check */
#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong"
#endif

ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
mov pc, lr

#else

mrc p15, 0, r0, c13, c0, 3 @ read TLS register
mov pc, lr

#endif

.rep 5
.word 0 @ pad up to __kuser_helper_version
.endr

/*
* Reference declaration:
*
* extern unsigned int __kernel_helper_version;
*
* Definition and user space usage example:
*
* #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
*
* User space may read this to determine the curent number of helpers
* available.
*/

__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)

.globl __kuser_helper_end
__kuser_helper_end:


/*
* Vector stubs.
*
Expand Down
58 changes: 50 additions & 8 deletions arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,13 +450,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)

case NR(set_tls):
thread->tp_value = regs->ARM_r0;
#ifdef CONFIG_HAS_TLS_REG
asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
#else
/*
* Our user accessible TLS ptr is located at 0xffff0ffc.
* On SMP read access to this address must raise a fault
* and be emulated from the data abort handler.
* m
* User space must never try to access this directly.
* Expect your app to break eventually if you do so.
* The user helper at 0xffff0fe0 must be used instead.
* (see entry-armv.S for details)
*/
*((unsigned long *)0xffff0ffc) = thread->tp_value;
*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
#endif
return 0;

default:
Expand Down Expand Up @@ -493,6 +497,41 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
return 0;
}

#if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG)

/*
* We might be running on an ARMv6+ processor which should have the TLS
* register, but for some reason we can't use it and have to emulate it.
*/

static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
{
int reg = (instr >> 12) & 15;
if (reg == 15)
return 1;
regs->uregs[reg] = current_thread_info()->tp_value;
regs->ARM_pc += 4;
return 0;
}

static struct undef_hook arm_mrc_hook = {
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e1d0f70,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = get_tp_trap,
};

static int __init arm_mrc_hook_init(void)
{
register_undef_hook(&arm_mrc_hook);
return 0;
}

late_initcall(arm_mrc_hook_init);

#endif

void __bad_xchg(volatile void *ptr, int size)
{
printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
Expand Down Expand Up @@ -580,14 +619,17 @@ void __init trap_init(void)
{
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;

/*
* Copy the vectors and stubs (in entry-armv.S) into the
* vector page, mapped at 0xffff0000, and ensure these are
* visible to the instruction stream.
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
Loading

0 comments on commit dd96a8e

Please sign in to comment.