/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. We need to bypass this check for dumped fake mmap chunks from the old main arena because the new malloc may provide additional alignment. */ if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) || __builtin_expect (misaligned_chunk (oldp), 0)) && !DUMPED_MAIN_ARENA_CHUNK (oldp)) malloc_printerr ("realloc(): invalid pointer");
checked_request2size (bytes, nb);
if (chunk_is_mmapped (oldp)) { /* If this is a faked mmapped chunk from the dumped main arena, always make a copy (and do not free the old chunk). */ if (DUMPED_MAIN_ARENA_CHUNK (oldp)) { /* Must alloc, copy, free. */ void *newmem = __libc_malloc (bytes); if (newmem == 0) returnNULL; /* Copy as many bytes as are available from the old chunk and fit into the new size. NB: The overhead for faked mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for regular mmapped chunks. */ if (bytes > oldsize - SIZE_SZ) bytes = oldsize - SIZE_SZ; memcpy (newmem, oldmem, bytes); return newmem; }
void *newmem;
#if HAVE_MREMAP newp = mremap_chunk (oldp, nb); if (newp) return chunk2mem (newp); #endif /* Note the extra SIZE_SZ overhead. */ if (oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
/* Must alloc, copy, free. */ newmem = __libc_malloc (bytes); if (newmem == 0) return0; /* propagate failure */