Skip to content

Commit

Permalink
[PATCH] powerpc vdso updates
Browse files Browse the repository at this point in the history
This patch cleans up some locking & error handling in the ppc vdso and
moves the vdso base pointer from the thread struct to the mm context
where it more logically belongs. It brings the powerpc implementation
closer to Ingo's new x86 one and also adds an arch_vma_name() function
allowing to print [vsdo] in /proc/<pid>/maps if Ingo's x86 vdso patch is
also applied.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Benjamin Herrenschmidt authored and Paul Mackerras committed Jun 9, 2006
1 parent 98a90c0 commit a5bba93
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 29 deletions.
8 changes: 4 additions & 4 deletions arch/powerpc/kernel/signal_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -757,10 +757,10 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,

/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
if (vdso32_rt_sigtramp && current->thread.vdso_base) {
if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, frame, 0))
goto badframe;
regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
Expand Down Expand Up @@ -1029,10 +1029,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &sc->signal))
goto badframe;

if (vdso32_sigtramp && current->thread.vdso_base) {
if (vdso32_sigtramp && current->mm->context.vdso_base) {
if (save_user_regs(regs, &frame->mctx, 0))
goto badframe;
regs->link = current->thread.vdso_base + vdso32_sigtramp;
regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe;
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/signal_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,8 +394,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fpscr.val = 0;

/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->thread.vdso_base) {
regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;
if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
Expand Down
57 changes: 36 additions & 21 deletions arch/powerpc/kernel/vdso.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;

#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
Expand All @@ -237,20 +238,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_base = VDSO32_MBASE;
#endif

current->thread.vdso_base = 0;
current->mm->context.vdso_base = 0;

/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;

vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL)
return -ENOMEM;

memset(vma, 0, sizeof(*vma));

/* Add a page to the vdso size for the data page */
vdso_pages ++;

Expand All @@ -259,17 +253,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (vdso_base & ~PAGE_MASK) {
kmem_cache_free(vm_area_cachep, vma);
return (int)vdso_base;
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto fail_mmapsem;
}

current->thread.vdso_base = vdso_base;

/* Allocate a VMA structure and fill it up */
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL) {
rc = -ENOMEM;
goto fail_mmapsem;
}
vma->vm_mm = mm;
vma->vm_start = current->thread.vdso_base;
vma->vm_start = vdso_base;
vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT);

/*
Expand All @@ -282,23 +282,38 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*/
vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;

down_write(&mm->mmap_sem);
if (insert_vm_struct(mm, vma)) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
/* Insert new VMA */
rc = insert_vm_struct(mm, vma);
if (rc)
goto fail_vma;

/* Put vDSO base into mm struct and account for memory usage */
current->mm->context.vdso_base = vdso_base;
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);

return 0;

fail_vma:
kmem_cache_free(vm_area_cachep, vma);
fail_mmapsem:
up_write(&mm->mmap_sem);
return rc;
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
return "[vdso]";
return NULL;
}



static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
Expand Down
2 changes: 1 addition & 1 deletion include/asm-powerpc/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
} while (0)

/* PowerPC64 relocations defined by the ABIs */
Expand Down
1 change: 1 addition & 0 deletions include/asm-powerpc/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,7 @@ typedef struct {
#ifdef CONFIG_HUGETLB_PAGE
u16 low_htlb_areas, high_htlb_areas;
#endif
unsigned long vdso_base;
} mm_context_t;


Expand Down
3 changes: 3 additions & 0 deletions include/asm-powerpc/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,9 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);

struct vm_area_struct;
extern const char *arch_vma_name(struct vm_area_struct *vma);

#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */

Expand Down
1 change: 0 additions & 1 deletion include/asm-powerpc/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ struct thread_struct {
unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumilated purr for process */
#endif
unsigned long vdso_base; /* base of the vDSO library */
unsigned long dabr; /* Data address breakpoint register */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
Expand Down

0 comments on commit a5bba93

Please sign in to comment.