Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 279784
b: refs/heads/master
c: 7dbaa46
h: refs/heads/master
v: v3
  • Loading branch information
Rob Herring authored and Russell King committed Dec 6, 2011
1 parent 55ab82b commit e6abb69
Show file tree
Hide file tree
Showing 4 changed files with 172 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d22759ed5680055fdecbdcf6644dbc8a467ab801
refs/heads/master: 7dbaa466780a754154531b44c2086f6618cee3a8
1 change: 1 addition & 0 deletions trunk/arch/arm/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

/*
* remap a physical page `pfn' of size `size' with page protection `prot'
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/arm/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ static inline void prefetch(const void *ptr)

#endif

#define HAVE_ARCH_PICK_MMAP_LAYOUT

#endif

#endif /* __ASM_ARM_PROCESSOR_H */
173 changes: 168 additions & 5 deletions trunk/arch/arm/mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,49 @@
#include <linux/random.h>
#include <asm/cachetype.h>

static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = addr & ~(SHMLBA-1);
unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);

if (base + off <= addr)
return base + off;

return base - off;
}

#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))

/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)

static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;

if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
return 1;

return sysctl_legacy_va_layout;
}

static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);

if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;

return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}

/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
Expand Down Expand Up @@ -68,13 +107,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > mm->cached_hole_size) {
start_addr = addr = mm->free_area_cache;
} else {
start_addr = addr = TASK_UNMAPPED_BASE;
start_addr = addr = mm->mmap_base;
mm->cached_hole_size = 0;
}
/* 8 bits of randomness in 20 address space bits */
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE))
addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;

full_search:
if (do_align)
Expand Down Expand Up @@ -111,6 +146,134 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}

unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();

/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);

/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;

if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}

/* requesting a specific address */
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}

/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}

/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache;
if (do_align) {
unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
}

/* make sure it can fit in the remaining address space */
if (addr > len) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start)
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}

if (mm->mmap_base < len)
goto bottomup;

addr = mm->mmap_base - len;
if (do_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);

do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (!vma || addr+len <= vma->vm_start)
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);

/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;

/* try just below the current vma->vm_start */
addr = vma->vm_start - len;
if (do_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (len < vma->vm_start);

bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;

return addr;
}

void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;

/* 8 bits of randomness in 20 address space bits */
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE))
random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;

if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
}

/*
* You really shouldn't be using read() or write() on /dev/mem. This
Expand Down

0 comments on commit e6abb69

Please sign in to comment.