Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 338759
b: refs/heads/master
c: f990247
h: refs/heads/master
i:
  338757: 3a0aa1d
  338755: 3c93dd5
  338751: 2d9f22d
v: v3
  • Loading branch information
Michel Lespinasse authored and Linus Torvalds committed Dec 12, 2012
1 parent 3681034 commit 055ba41
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 127 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: db4fbfb9523c93583c339e66023506f651c1d54b
refs/heads/master: f99024729e689f5de4534fde5400e3b035f068de
6 changes: 2 additions & 4 deletions trunk/arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void)
return 0;
}

/* The first two values are special, do not change. See align_addr() */
/* Do not change the values. See get_align_mask() */
enum align_flags {
ALIGN_VA_32 = BIT(0),
ALIGN_VA_64 = BIT(1),
ALIGN_VDSO = BIT(2),
ALIGN_TOPDOWN = BIT(3),
};

struct va_alignment {
Expand All @@ -368,5 +366,5 @@ struct va_alignment {
} ____cacheline_aligned;

extern struct va_alignment va_align;
extern unsigned long align_addr(unsigned long, struct file *, enum align_flags);
extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
151 changes: 30 additions & 121 deletions trunk/arch/x86/kernel/sys_x86_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,37 +21,23 @@

/*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
*
* @flags denotes the allocation direction - bottomup or topdown -
* or vDSO; see call sites below.
*/
unsigned long align_addr(unsigned long addr, struct file *filp,
enum align_flags flags)
static unsigned long get_align_mask(void)
{
unsigned long tmp_addr;

/* handle 32- and 64-bit case with a single conditional */
if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
return addr;
return 0;

if (!(current->flags & PF_RANDOMIZE))
return addr;

if (!((flags & ALIGN_VDSO) || filp))
return addr;

tmp_addr = addr;

/*
* We need an address which is <= than the original
* one only when in topdown direction.
*/
if (!(flags & ALIGN_TOPDOWN))
tmp_addr += va_align.mask;
return 0;

tmp_addr &= ~va_align.mask;
return va_align.mask;
}

return tmp_addr;
unsigned long align_vdso_addr(unsigned long addr)
{
unsigned long align_mask = get_align_mask();
return (addr + align_mask) & ~align_mask;
}

static int __init control_va_addr_alignment(char *str)
Expand Down Expand Up @@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
struct vm_unmapped_area_info info;
unsigned long begin, end;

if (flags & MAP_FIXED)
Expand All @@ -144,58 +130,25 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
&& len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = begin;
}
addr = mm->free_area_cache;
if (addr < begin)
addr = begin;
start_addr = addr;

full_search:

addr = align_addr(addr, filp, 0);

for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (end - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != begin) {
start_addr = addr = begin;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;

addr = vma->vm_end;
addr = align_addr(addr, filp, 0);
}
info.flags = 0;
info.length = len;
info.low_limit = begin;
info.high_limit = end;
info.align_mask = filp ? get_align_mask() : 0;
info.align_offset = 0;
return vm_unmapped_area(&info);
}


unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0, start_addr;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;

/* requested length too big for entire address space */
if (len > TASK_SIZE)
Expand All @@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}

/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}

try_again:
/* either no address requested or can't fit in requested address hole */
start_addr = addr = mm->free_area_cache;

if (addr < len)
goto fail;

addr -= len;
do {
addr = align_addr(addr, filp, ALIGN_TOPDOWN);

/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (!vma || addr+len <= vma->vm_start)
/* remember the address as a hint for next time */
return mm->free_area_cache = addr;

/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;

/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
} while (len < vma->vm_start);

fail:
/*
* if hint left us with no space for the requested
* mapping then try again:
*/
if (start_addr != mm->mmap_base) {
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = 0;
goto try_again;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = filp ? get_align_mask() : 0;
info.align_offset = 0;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);

bottomup:
/*
Expand All @@ -270,14 +188,5 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;

return addr;
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
2 changes: 1 addition & 1 deletion trunk/arch/x86/vdso/vma.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
* unaligned here as a result of stack start randomization.
*/
addr = PAGE_ALIGN(addr);
addr = align_addr(addr, NULL, ALIGN_VDSO);
addr = align_vdso_addr(addr);

return addr;
}
Expand Down

0 comments on commit 055ba41

Please sign in to comment.