Skip to content

Commit

Permalink
MIPS: Move arch_get_unmapped_area and gang to new file.
Browse files Browse the repository at this point in the history
It never really belonged into syscall.c and it's about to become well more
complex.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
  • Loading branch information
Ralf Baechle committed May 19, 2011
1 parent 9c1e8a9 commit 6f6c3c3
Show file tree
Hide file tree
Showing 3 changed files with 124 additions and 114 deletions.
113 changes: 0 additions & 113 deletions arch/mips/kernel/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,9 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/mman.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/file.h>
Expand All @@ -25,11 +22,9 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/elf.h>

#include <asm/asm.h>
Expand Down Expand Up @@ -66,114 +61,6 @@ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
return res;
}

unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */

EXPORT_SYMBOL(shm_align_mask);

#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))

unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct * vmm;
int do_color_align;

if (len > TASK_SIZE)
return -ENOMEM;

if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE. */
if (TASK_SIZE - len < addr)
return -EINVAL;

/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}

do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vmm = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
(!vmm || addr + len <= vmm->vm_start))
return addr;
}
addr = current->mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);

for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
}

void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;

if (current->flags & PF_RANDOMIZE) {
random_factor = get_random_int();
random_factor = random_factor << PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
random_factor &= 0xfffffful;
else
random_factor &= 0xffffffful;
}

mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
}

static inline unsigned long brk_rnd(void)
{
unsigned long rnd = get_random_int();

rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
if (TASK_IS_32BIT_ADDR)
rnd = rnd & 0x7ffffful;
else
rnd = rnd & 0xffffffful;

return rnd;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long base = mm->brk;
unsigned long ret;

ret = PAGE_ALIGN(base + brk_rnd());

if (ret < mm->brk)
return mm->brk;

return ret;
}

SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
Expand Down
3 changes: 2 additions & 1 deletion arch/mips/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
#

obj-y += cache.o dma-default.o extable.o fault.o \
init.o tlbex.o tlbex-fault.o uasm.o page.o
init.o mmap.o tlbex.o tlbex-fault.o uasm.o \
page.o

obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
Expand Down
122 changes: 122 additions & 0 deletions arch/mips/mm/mmap.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>

unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */

EXPORT_SYMBOL(shm_align_mask);

#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))

unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct * vmm;
int do_color_align;

if (len > TASK_SIZE)
return -ENOMEM;

if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE. */
if (TASK_SIZE - len < addr)
return -EINVAL;

/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}

do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vmm = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
(!vmm || addr + len <= vmm->vm_start))
return addr;
}
addr = current->mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);

for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
}

void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;

if (current->flags & PF_RANDOMIZE) {
random_factor = get_random_int();
random_factor = random_factor << PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
random_factor &= 0xfffffful;
else
random_factor &= 0xffffffful;
}

mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
}

static inline unsigned long brk_rnd(void)
{
unsigned long rnd = get_random_int();

rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
if (TASK_IS_32BIT_ADDR)
rnd = rnd & 0x7ffffful;
else
rnd = rnd & 0xffffffful;

return rnd;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long base = mm->brk;
unsigned long ret;

ret = PAGE_ALIGN(base + brk_rnd());

if (ret < mm->brk)
return mm->brk;

return ret;
}

0 comments on commit 6f6c3c3

Please sign in to comment.