Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 36324
b: refs/heads/master
c: 19f9a34
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mundt committed Sep 27, 2006
1 parent 00546f7 commit 1efe8e5
Show file tree
Hide file tree
Showing 20 changed files with 474 additions and 18 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8c12b5dc13bf8516303a8224ab4e9708b33d5b00
refs/heads/master: 19f9a34f87c48bbd270d617d1c986d0c23866a1a
1 change: 1 addition & 0 deletions trunk/arch/sh/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
io.o io_generic.o sh_ksyms.o syscalls.o

obj-y += cpu/ timers/
obj-$(CONFIG_VSYSCALL) += vsyscall/

obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
else if (next->thread.ubc_pc && next->mm) {
int asid = 0;
#ifdef CONFIG_MMU
asid |= next->mm->context & MMU_CONTEXT_ASID_MASK;
asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
#endif
ubc_set_tracing(asid, next->thread.ubc_pc);
} else {
Expand Down
17 changes: 14 additions & 3 deletions trunk/arch/sh/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
*/

#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
Expand All @@ -21,6 +20,7 @@
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/elf.h>
#include <linux/personality.h>
#include <linux/binfmts.h>

Expand All @@ -29,8 +29,6 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>

#undef DEBUG

#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))

/*
Expand Down Expand Up @@ -312,6 +310,11 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}

/* These symbols are defined with the addresses in the vsyscall page.
See vsyscall-trapa.S. */
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;

static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
Expand Down Expand Up @@ -340,6 +343,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
#ifdef CONFIG_VSYSCALL
} else if (likely(current->mm->context.vdso)) {
regs->pr = VDSO_SYM(&__kernel_sigreturn);
#endif
} else {
/* Generate return code (system call to sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
Expand Down Expand Up @@ -416,6 +423,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
#ifdef CONFIG_VSYSCALL
} else if (likely(current->mm->context.vdso)) {
regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
#endif
} else {
/* Generate return code (system call to rt_sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
Expand Down
36 changes: 36 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
obj-y += vsyscall.o vsyscall-syscall.o

$(obj)/vsyscall-syscall.o: \
$(foreach F,trapa,$(obj)/vsyscall-$F.so)

# Teach kbuild about targets
targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
targets += vsyscall-note.o vsyscall.lds

# The DSO images are built using a special linker script
quiet_cmd_syscall = SYSCALL $@
cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
-Wl,-T,$(filter-out FORCE,$^) -o $@

export CPPFLAGS_vsyscall.lds += -P -C -Ush

vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)

SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)

$(obj)/vsyscall-trapa.so: \
$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
$(call if_changed,syscall)

# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to
# these symbols in the kernel code rather than hand-coded addresses.
extra-y += vsyscall-syms.o
$(obj)/built-in.o: $(obj)/vsyscall-syms.o
$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o

SYSCFLAGS_vsyscall-syms.o = -r
$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
$(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall)
25 changes: 25 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/vsyscall-note.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/

#include <linux/uts.h>
#include <linux/version.h>

#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
.section name, flags; \
.balign 4; \
.long 1f - 0f; /* name length */ \
.long 3f - 2f; /* data length */ \
.long type; /* note type */ \
0: .asciz vendor; /* vendor name */ \
1: .balign 4; \
2:

#define ASM_ELF_NOTE_END \
3: .balign 4; /* pad out section */ \
.previous

ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
.long LINUX_VERSION_CODE
ASM_ELF_NOTE_END
39 changes: 39 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#include <asm/unistd.h>

.text
.balign 32
.globl __kernel_sigreturn
.type __kernel_sigreturn,@function
__kernel_sigreturn:
.LSTART_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0

1: .short __NR_sigreturn
.LEND_sigreturn:
.size __kernel_sigreturn,.-.LSTART_sigreturn

.balign 32
.globl __kernel_rt_sigreturn
.type __kernel_rt_sigreturn,@function
__kernel_rt_sigreturn:
.LSTART_rt_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0

1: .short __NR_rt_sigreturn
.LEND_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn

.section .eh_frame,"a",@progbits
.previous
10 changes: 10 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/vsyscall-syscall.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#include <linux/init.h>

__INITDATA

.globl vsyscall_trapa_start, vsyscall_trapa_end
vsyscall_trapa_start:
.incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
vsyscall_trapa_end:

__FINIT
42 changes: 42 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/vsyscall-trapa.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
__kernel_vsyscall:
.LSTART_vsyscall:
/* XXX: We'll have to do something here once we opt to use the vDSO
* page for something other than the signal trampoline.. as well as
* fill out .eh_frame -- PFM. */
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
.previous

.section .eh_frame,"a",@progbits
.LCIE:
.ualong .LCIE_end - .LCIE_start
.LCIE_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
/* Augmentation length and data (none) */
.byte 0xc /* DW_CFA_def_cfa */
.uleb128 0xf /* r15 */
.uleb128 0x0 /* offset 0 */

.align 2
.LCIE_end:

.ualong .LFDE_end-.LFDE_start /* Length FDE */
.LFDE_start:
.ualong .LCIE /* CIE pointer */
.ualong .LSTART_vsyscall-. /* start address */
.ualong .LEND_vsyscall-.LSTART_vsyscall
.uleb128 0
.align 2
.LFDE_end:
.previous

/* Get the common code for the sigreturn entry points */
#include "vsyscall-sigreturn.S"
150 changes: 150 additions & 0 deletions trunk/arch/sh/kernel/vsyscall/vsyscall.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* arch/sh/kernel/vsyscall.c
*
* Copyright (C) 2006 Paul Mundt
*
* vDSO randomization
* Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/elf.h>

/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
EXPORT_SYMBOL_GPL(vdso_enabled);

static int __init vdso_setup(char *s)
{
vdso_enabled = simple_strtoul(s, NULL, 0);
return 1;
}
__setup("vdso=", vdso_setup);

/*
* These symbols are defined by vsyscall.o to mark the bounds
* of the ELF DSO images included therein.
*/
extern const char vsyscall_trapa_start, vsyscall_trapa_end;
static void *syscall_page;

int __init vsyscall_init(void)
{
syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);

/*
* XXX: Map this page to a fixmap entry if we get around
* to adding the page to ELF core dumps
*/

memcpy(syscall_page,
&vsyscall_trapa_start,
&vsyscall_trapa_end - &vsyscall_trapa_start);

return 0;
}

static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
unsigned long offset = address - vma->vm_start;
struct page *page;

if (address < vma->vm_start || address > vma->vm_end)
return NOPAGE_SIGBUS;

page = virt_to_page(syscall_page + offset);

get_page(page);

return page;
}

/* Prevent VMA merging */
static void syscall_vma_close(struct vm_area_struct *vma)
{
}

static struct vm_operations_struct syscall_vm_ops = {
.nopage = syscall_vma_nopage,
.close = syscall_vma_close,
};

/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret;

down_write(&mm->mmap_sem);
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}

vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto up_fail;
}

vma->vm_start = addr;
vma->vm_end = addr + PAGE_SIZE;
/* MAYWRITE to allow gdb to COW and set breakpoints */
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
vma->vm_ops = &syscall_vm_ops;
vma->vm_mm = mm;

ret = insert_vm_struct(mm, vma);
if (unlikely(ret)) {
kmem_cache_free(vm_area_cachep, vma);
goto up_fail;
}

current->mm->context.vdso = (void *)addr;

mm->total_vm++;
up_fail:
up_write(&mm->mmap_sem);
return ret;
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";

return NULL;
}

struct vm_area_struct *get_gate_vma(struct task_struct *task)
{
return NULL;
}

int in_gate_area(struct task_struct *task, unsigned long address)
{
return 0;
}

int in_gate_area_no_task(unsigned long address)
{
return 0;
}
Loading

0 comments on commit 1efe8e5

Please sign in to comment.