Skip to content

Commit

Permalink
Properly handle MALLOC_ALIGNMENT > 2 * SIZE_SZ
Browse files Browse the repository at this point in the history
  • Loading branch information
H.J. Lu committed May 24, 2012
1 parent 7f90742 commit b5a2bbe
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 14 deletions.
11 changes: 11 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
2012-05-24 Daniel Jacobowitz <drow@false.org>
H.J. Lu <hongjiu.lu@intel.com>

[BZ #12495]
* malloc/malloc.c (SMALLBIN_CORRECTION): New.
(MIN_LARGE_SIZE, smallbin_index): Use it to handle 16-byte alignment.
(largebin_index_32_big): New.
(largebin_index): Use it for 16-byte alignment.
(sYSMALLOc): Handle MALLOC_ALIGNMENT > 2 * SIZE_SZ. Don't update
correction with front_misalign.

2012-05-24 H.J. Lu <hongjiu.lu@intel.com>

* sysdeps/unix/sysv/linux/x86_64/x32/nptl/ld.abilist: New file.
Expand Down
78 changes: 64 additions & 14 deletions malloc/malloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1472,18 +1472,23 @@ typedef struct malloc_chunk* mbinptr;
The bins top out around 1MB because we expect to service large
requests via mmap.
Bin 0 does not exist. Bin 1 is the unordered list; if that would be
a valid chunk size the small bins are bumped up one.
*/

#define NBINS 128
#define NSMALLBINS 64
#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
#define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)

#define in_smallbin_range(sz) \
((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)

#define smallbin_index(sz) \
(SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
+ SMALLBIN_CORRECTION)

#define largebin_index_32(sz) \
(((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
Expand All @@ -1493,6 +1498,14 @@ typedef struct malloc_chunk* mbinptr;
((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
126)

#define largebin_index_32_big(sz) \
(((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \
((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
126)

// XXX It remains to be seen whether it is good to keep the widths of
// XXX the buckets the same or whether it should be scaled by a factor
// XXX of two as well.
Expand All @@ -1505,7 +1518,9 @@ typedef struct malloc_chunk* mbinptr;
126)

#define largebin_index(sz) \
(SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
(SIZE_SZ == 8 ? largebin_index_64 (sz) \
: MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
: largebin_index_32 (sz))

#define bin_index(sz) \
((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
Expand Down Expand Up @@ -2273,8 +2288,12 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
is no following chunk whose prev_size field could be used.
See the front_misalign handling below, for glibc there is no
need for further alignments. */
size = (nb + SIZE_SZ + pagemask) & ~pagemask;
need for further alignments unless we have have high alignment.
*/
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
size = (nb + SIZE_SZ + pagemask) & ~pagemask;
else
size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
tried_mmap = true;

/* Don't try if size wraps around 0 */
Expand All @@ -2290,14 +2309,29 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
returned start address to meet alignment requirements here
and in memalign(), and still be able to compute proper
address argument for later munmap in free() and realloc().
*/

For glibc, chunk2mem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);

p = (mchunkptr)mm;
set_head(p, size|IS_MMAPPED);
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
{
/* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
front_misalign = 0;
}
else
front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {
correction = MALLOC_ALIGNMENT - front_misalign;
p = (mchunkptr)(mm + correction);
p->prev_size = correction;
set_head(p, (size - correction) |IS_MMAPPED);
}
else
{
p = (mchunkptr)mm;
set_head(p, size|IS_MMAPPED);
}

/* update statistics */

Expand Down Expand Up @@ -2565,8 +2599,24 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)

/* handle non-contiguous cases */
else {
/* MORECORE/mmap must correctly align */
assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
/* MORECORE/mmap must correctly align */
assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
else {
front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0) {

/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/

aligned_brk += MALLOC_ALIGNMENT - front_misalign;
}
}

/* Find out current end of memory */
if (snd_brk == (char*)(MORECORE_FAILURE)) {
Expand Down

0 comments on commit b5a2bbe

Please sign in to comment.