Skip to content

Commit

Permalink
Update.
Browse files Browse the repository at this point in the history
2000-12-28  Wolfram Gloger  <wg@malloc.de>

	* malloc/malloc.c (MALLOC_COPY): Handle case if source and
	destination overlap.  Assume dest is always below source if
	overlapping.
  • Loading branch information
Ulrich Drepper committed Dec 31, 2000
1 parent c77a447 commit 09f5e16
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 38 deletions.
6 changes: 6 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
2000-12-28 Wolfram Gloger <wg@malloc.de>

* malloc/malloc.c (MALLOC_COPY): Handle case if source and
destination overlap. Assume dest is always below source if
overlapping.

2000-12-30 Ulrich Drepper <drepper@redhat.com>

* elf/dl-close.c (_dl_close): We can ignore the NODELETE flag if the
Expand Down
15 changes: 7 additions & 8 deletions elf/dl-open.c
Original file line number Diff line number Diff line change
Expand Up @@ -269,10 +269,6 @@ dl_open_worker (void *a)
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0);

/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;

/* So far, so good. Now check the versions. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_versions == NULL)
Expand Down Expand Up @@ -321,6 +317,10 @@ dl_open_worker (void *a)
l = l->l_prev;
}

/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;

/* Run the initializer functions of new objects. */
_dl_init (new, __libc_argc, __libc_argv, __environ);

Expand Down Expand Up @@ -399,11 +399,10 @@ _dl_open (const char *file, int mode, const void *caller)
{
int i;

/* Increment open counters for all objects which did not get
correctly loaded. */
/* Increment open counters for all objects since this has
not happened yet. */
for (i = 0; i < args.map->l_searchlist.r_nlist; ++i)
if (args.map->l_searchlist.r_list[i]->l_opencount == 0)
args.map->l_searchlist.r_list[i]->l_opencount = 1;
++args.map->l_searchlist.r_list[i]->l_opencount;

_dl_close (args.map);
}
Expand Down
5 changes: 5 additions & 0 deletions linuxthreads/ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
2000-11-15 Wolfram Gloger <wg@malloc.de>

* manager.c (pthread_free): [!FLOATING_STACKS]: Only remap the
stack to PROT_NONE, don't unmap it, avoiding collisions with malloc.

2000-12-27 Andreas Jaeger <aj@suse.de>

* Examples/ex13.c: Make local functions static.
Expand Down
31 changes: 16 additions & 15 deletions linuxthreads/manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,

new_thread_bottom = (char *) map_addr + guardsize;
new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
# else
# else /* !FLOATING_STACKS */
if (attr != NULL)
{
guardsize = page_roundup (attr->__guardsize, granularity);
Expand Down Expand Up @@ -696,23 +696,24 @@ static void pthread_free(pthread_descr th)
{
size_t guardsize = th->p_guardsize;
/* Free the stack and thread descriptor area */
#ifdef NEED_SEPARATE_REGISTER_STACK
char *guardaddr = th->p_guardaddr;
/* We unmap exactly what we mapped, in case there was something
else in the same region. Guardaddr is always set, eve if
guardsize is 0. This allows us to compute everything else. */
/* Guardaddr is always set, even if guardsize is 0. This allows
us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
/* Unmap the register stack, which is below guardaddr. */
munmap((caddr_t)(guardaddr-stacksize),
2 * stacksize + th->p_guardsize);
#ifdef NEED_SEPARATE_REGISTER_STACK
/* Take account of the register stack, which is below guardaddr. */
guardaddr -= stacksize;
stacksize *= 2;
#endif
#if FLOATING_STACKS
/* Can unmap safely. */
munmap(guardaddr, stacksize + guardsize);
#else
char *guardaddr = th->p_guardaddr;
/* We unmap exactly what we mapped, in case there was something
else in the same region. Guardaddr is always set, eve if
guardsize is 0. This allows us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;

munmap (guardaddr, stacksize + guardsize);
/* Only remap to PROT_NONE, so that the region is reserved in
case we map the stack again later. Avoid collision with
other mmap()s, in particular by malloc(). */
mmap(guardaddr, stacksize + guardsize, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
#endif
}
}
Expand Down
36 changes: 21 additions & 15 deletions malloc/malloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -423,11 +423,12 @@ Void_t* memmove();
#endif
#endif

#if USE_MEMCPY

/* The following macros are only invoked with (2n+1)-multiples of
INTERNAL_SIZE_T units, with a positive integer n. This is exploited
for fast inline execution when n is small. */
for fast inline execution when n is small. If the regions to be
copied do overlap, the destination lies always _below_ the source. */

#if USE_MEMCPY

#define MALLOC_ZERO(charp, nbytes) \
do { \
Expand All @@ -446,7 +447,9 @@ do { \
} else memset((charp), 0, mzsz); \
} while(0)

#define MALLOC_COPY(dest,src,nbytes) \
/* If the regions overlap, dest is always _below_ src. */

#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T mcsz = (nbytes); \
if(mcsz <= 9*sizeof(mcsz)) { \
Expand All @@ -461,12 +464,12 @@ do { \
*mcdst++ = *mcsrc++; \
*mcdst++ = *mcsrc++; \
*mcdst = *mcsrc ; \
} else memcpy(dest, src, mcsz); \
} else if(overlap) \
memmove(dest, src, mcsz); \
else \
memcpy(dest, src, mcsz); \
} while(0)

#define MALLOC_MEMMOVE(dest,src,nbytes) \
memmove(dest, src, mcsz)

#else /* !USE_MEMCPY */

/* Use Duff's device for good zeroing/copying performance. */
Expand All @@ -488,7 +491,9 @@ do { \
} \
} while(0)

#define MALLOC_COPY(dest,src,nbytes) \
/* If the regions overlap, dest is always _below_ src. */

#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
Expand Down Expand Up @@ -3255,7 +3260,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Must alloc, copy, free. */
newmem = mALLOc(bytes);
if (newmem == 0) return 0; /* propagate failure */
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
return newmem;
}
Expand Down Expand Up @@ -3370,7 +3375,8 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize + nextsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize,
1);
top(ar_ptr) = chunk_at_offset(newp, nb);
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb);
Expand All @@ -3385,7 +3391,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += nextsize + prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
Expand All @@ -3396,7 +3402,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
Expand Down Expand Up @@ -3436,7 +3442,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
}

/* Otherwise copy, free, and exit */
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 0);
chunk_free(ar_ptr, oldp);
return newp;
}
Expand Down Expand Up @@ -4605,7 +4611,7 @@ realloc_check(oldmem, bytes, caller)
newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) {
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb),
oldmem, oldsize - 2*SIZE_SZ);
oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
}
}
Expand Down

0 comments on commit 09f5e16

Please sign in to comment.