/* GCC can always grok prototypes. For C++ programs we add throw() to help it optimize the function calls. But this works only with gcc 2.8.x and egcs. For gcc 3.2 and up we even mark C functions as non-throwing using a function attribute since programs can use the -fexceptions options for C code as well. */ # if !defined __cplusplus && __GNUC_PREREQ (3, 3) # define __THROW __attribute__ ((__nothrow__ __LEAF)) # define __THROWNL __attribute__ ((__nothrow__)) # define __NTH(fct) __attribute__ ((__nothrow__ __LEAF)) fct # else # if defined __cplusplus && __GNUC_PREREQ (2,8) # define __THROW throw () # define __THROWNL throw () # define __NTH(fct) __LEAF_ATTR fct throw () # else # define __THROW # define __THROWNL # define __NTH(fct) fct # endif # endif
若是在C++环境,表明该函数不会扔出异常,以避免编译器生成对异常的处理代码,从而优化代码生成结果.
在C环境下没有任何效果
__attribute_malloc__:优化malloc类型函数
这个符号同样定义于misc/sys/cdefs.h下
1 2 3 4 5 6 7 8
/* At some point during the gcc 2.96 development the `malloc' attribute for functions was introduced. We don't want to use it unconditionally (although this would be possible) since it generates warnings. */ #if __GNUC_PREREQ (2,96) # define __attribute_malloc__ __attribute__ ((__malloc__)) #else # define __attribute_malloc__ /* Ignore */ #endif
对于gcc2.96及以上的版本,该段代码将会使用编译器的__attribute__指令
查阅gcc手册中说明如下:
1
malloc
Using this attribute can improve optimization. Compiler predicts that a function with the attribute returns non-null in most cases. Functions like malloc and calloc have this property because they return a pointer to uninitialized or zeroed-out storage. However, functions like realloc do not have this property, as they can return a pointer to storage containing pointers.
/* Convenience macros to test the versions of glibc and gcc. Use them like this: #if __GNUC_PREREQ (2,8) ... code requiring gcc 2.8 or later ... #endif Note - they won't work for gcc1 or glibc1, since the _MINOR macros were not defined then. */ #if defined __GNUC__ && defined __GNUC_MINOR__ # define __GNUC_PREREQ(maj, min) \ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) #else # define __GNUC_PREREQ(maj, min) 0 #endif
大意是检测gcc的版本,这里就不具体展开说明了
gcc pre-requirement
__wur:若函数返回值未被引用则抛出警告
依然是位于misc/sys/cdefs.h下的一个宏定义,代码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* If fortification mode, we warn about unused results of certain function calls which can lead to problems. */ #if __GNUC_PREREQ (3,4) # define __attribute_warn_unused_result__ \ __attribute__ ((__warn_unused_result__)) # if __USE_FORTIFY_LEVEL > 0 # define __wur __attribute_warn_unused_result__ # endif #else # define __attribute_warn_unused_result__ /* empty */ #endif #ifndef __wur # define __wur /* Ignore */ #endif
The warn_unused_result attribute causes a warning to be emitted if a caller of the function with this attribute does not use its return value. This is useful for functions where not checking the result is either a security problem or always a bug, such as realloc.
/* malloc(size_t n) Returns a pointer to a newly allocated chunk of at least n bytes, or null if no space is available. Additionally, on failure, errno is set to ENOMEM on ANSI C systems.
If n is zero, malloc returns a minumum-sized chunk. (The minimum size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit systems.) On most systems, size_t is an unsigned type, so calls with negative arguments are interpreted as requests for huge amounts of space, which will often fail. The maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ void* __libc_malloc(size_t); libc_hidden_proto (__libc_malloc)
/* malloc(size_t n) Returns a pointer to a newly allocated chunk of at least n bytes, or null if no space is available. Additionally, on failure, errno is set to ENOMEM on ANSI C systems.
If n is zero, malloc returns a minumum-sized chunk. (The minimum size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit systems.) On most systems, size_t is an unsigned type, so calls with negative arguments are interpreted as requests for huge amounts of space, which will often fail. The maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ void* __libc_malloc(size_t); libc_hidden_proto (__libc_malloc)
Hidden visibility indicates that the entity declared has a new form of linkage, which we call “hidden linkage”. Two declarations of an object with hidden linkage refer to the same object if they are in the same shared object.
victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }
if (ar_ptr != NULL) (void) mutex_unlock (&ar_ptr->mutex);
#ifndef weak_variable /* In GNU libc we want the hook variables to be weak definitions to avoid a problem with Emacs. */ # define weak_variable weak_function #endif
还是套娃定义,跟进,在include/libc-symbols.h中可见其定义
1 2 3 4
/* This comes between the return type and function name in a function definition to make that definition weak. */ # define weak_function __attribute__ ((weak)) # define weak_const_function __attribute__ ((weak, __const__))
也就是说这还是一个gcc的attribute,查阅手册中说明如下:
1
weak
The weak attribute causes a declaration of an external symbol to be emitted as a weak symbol rather than a global. This is primarily useful in defining library functions that can be overridden in user code, though it can also be used with non-function declarations. The overriding symbol must have the same type as the weak symbol. In addition, if it designates a variable it must also have the same size and alignment as the weak symbol. Weak symbols are supported for ELF targets, and also for a.out targets when using the GNU assembler and linker.
staticvoid ptmalloc_init(void) { if (__malloc_initialized >= 0) return;
__malloc_initialized = 0;
#ifdef SHARED /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; structlink_map *l;
while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "=");
if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue;
/* If we don't have the main arena, then maybe the failure is due to running out of mmapped areas, so we can try allocating on the main arena. Otherwise, it is likely that sbrk() has failed and there is still a chance to mmap(), so try one of the other arenas. */ static mstate arena_get_retry(mstate ar_ptr, size_t bytes) { LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); if (ar_ptr != &main_arena) { (void) mutex_unlock (&ar_ptr->mutex); /* Don't touch the main arena if it is corrupt. */ if (arena_is_corrupt (&main_arena)) returnNULL;
# else /* For assembly, we need to do the opposite of what we do in C: in assembly gcc __REDIRECT stuff is not in place, so functions are defined by its normal name and we need to create the __GI_* alias to it, in C __REDIRECT causes the function definition to use __GI_* name and we need to add alias to the real name. There is no reason to use hidden_weak over hidden_def in assembly, but we provide it for consistency with the C usage. hidden_proto doesn't make sense for assembly but the equivalent is to call via the HIDDEN_JUMPTARGET macro instead of JUMPTARGET. */ # define hidden_def(name) strong_alias (name, __GI_##name) ... #else # ifndef __ASSEMBLER__ # define hidden_proto(name, attrs...) # define hidden_tls_proto(name, attrs...) # else # define HIDDEN_JUMPTARGET(name) JUMPTARGET(name) # endif/* Not __ASSEMBLER__ */ # define hidden_weak(name) # define hidden_def(name) ... #endif
malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code.
Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */
staticvoidmalloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */
/* These have same use as in free() */ mchunkptr nextchunk; INTERNAL_SIZE_T size; INTERNAL_SIZE_T nextsize; INTERNAL_SIZE_T prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd;
/* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */
if (get_max_fast () != 0) { clear_fastchunks(av);
unsorted_bin = unsorted_chunks(av);
/* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */
maxfb = &fastbin (av, NFASTBINS - 1); fb = &fastbin (av, 0); do { p = atomic_exchange_acq (fb, 0); if (p != 0) { do { check_inuse_chunk(av, p); nextp = p->fd;
/* Slightly streamlined version of consolidation code in free() */ size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk);
if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(av, p, bck, fwd); }
if (nextchunk != av->top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
staticvoid * _int_malloc (mstate av, size_t bytes) { INTERNAL_SIZE_T nb; /* normalized request size */ unsignedint idx; /* associated bin index */ mbinptr bin; /* associated bin */
mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T size; /* its size */ int victim_index; /* its bin index */
mchunkptr remainder; /* remainder from a split */ unsignedlong remainder_size; /* its size */
unsignedint block; /* bit map traverser */ unsignedint bit; /* bit map traverser */ unsignedintmap; /* current word of binmap */
mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */
constchar *errstr = NULL;
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */
checked_request2size (bytes, nb);
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely (av == NULL)) { void *p = sysmalloc (nb, av); if (p != NULL) alloc_perturb (p, bytes); return p; }
/* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */
if (in_smallbin_range (nb)) { idx = smallbin_index (nb); bin = bin_at (av, idx);
if ((victim = last (bin)) != bin) { if (victim == 0) /* initialization check */ malloc_consolidate (av); else { bck = victim->bk; if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset (victim, nb); bin->bk = bck; bck->fd = bin;
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
/* Process recently freed or remaindered chunks, taking one only if it is exact fit, or, if this a small request, the chunk is remainder from the most recent non-exact fit. Place other traversed chunks in bins. Note that this step is the only place in any routine where chunks are placed in bins.
The outer loop here is needed because we might not realize until near the end of malloc that we should have consolidated, so must do so and retry. This happens at most once, and only when we would otherwise need to expand memory to service a "small" request. */
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */
/* maintain large bins in sorted order */ if (fwd != bck) { /* Or with inuse bit to speed comparisons */ size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ assert ((bck->bk->size & NON_MAIN_ARENA) == 0); if ((unsignedlong) (size) < (unsignedlong) (bck->bk->size)) { fwd = bck; bck = bck->bk;
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */
if (!in_smallbin_range (nb)) { bin = bin_at (av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first (bin)) != bin && (unsignedlong) (victim->size) >= (unsignedlong) (nb)) { victim = victim->bk_nextsize; while (((unsignedlong) (size = chunksize (victim)) < (unsignedlong) (nb))) victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last (bin) && victim->size == victim->fd->size) victim = victim->fd;
/* Search for a chunk by scanning bins, starting with next largest bin. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected.
The bitmap avoids needing to check that most blocks are nonempty. The particular case of skipping all bins during warm-up phases when no chunks have been returned yet is faster than it might look. */
++idx; bin = bin_at (av, idx); block = idx2block (idx); map = av->binmap[block]; bit = idx2bit (idx);
for (;; ) { /* Skip rest of block if there are no more set bits in this block. */ if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ((map = av->binmap[block]) == 0);
bin = bin_at (av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ while ((bit & map) == 0) { bin = next_bin (bin); bit <<= 1; assert (bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ victim = last (bin);
/* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin (bin); bit <<= 1; }
else { size = chunksize (victim);
/* We know the first chunk in this bin is big enough to use. */ assert ((unsignedlong) (size) >= (unsignedlong) (nb));
remainder_size = size - nb;
/* unlink */ unlink (av, victim, bck, fwd);
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset (victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; }
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations).
We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise be exhausted by current request, it is replenished. (The main reason for ensuring it exists is that we may need MINSIZE space to put in fenceposts in sysmalloc.) */
/* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ elseif (have_fastchunks (av)) { malloc_consolidate (av); /* restore original bin index */ if (in_smallbin_range (nb)) idx = smallbin_index (nb); else idx = largebin_index (nb); }
/* ----------- Routines dealing with system allocation -------------- */
/* sysmalloc handles malloc cases requiring more memory from the system. On entry, it is assumed that av->top does not have enough space to service request for nb bytes, thus requiring that av->top be extended or replaced. */
staticvoid * sysmalloc(INTERNAL_SIZE_T nb, mstate av) { mchunkptr old_top; /* incoming value of av->top */ INTERNAL_SIZE_T old_size; /* its size */ char *old_end; /* its end address */
long size; /* arg to first MORECORE or mmap call */ char *brk; /* return value from MORECORE */
long correction; /* arg to 2nd MORECORE call */ char *snd_brk; /* 2nd return val */
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ char *aligned_brk; /* aligned offset into brk */
mchunkptr p; /* the allocated/returned chunk */ mchunkptr remainder; /* remainder from allocation */ unsignedlong remainder_size; /* its size */
/* If have mmap, and the request size meets the mmap threshold, and the system supports mmap, and there are few enough currently allocated mmapped regions, try to directly map this request rather than expanding top. */
if (av == NULL || ((unsignedlong) (nb) >= (unsignedlong) (mp_.mmap_threshold) && (mp_.n_mmaps < mp_.n_mmaps_max))) { char *mm; /* return value from mmap call*/
try_mmap: /* Round up size to nearest page. For mmapped chunks, the overhead is one SIZE_SZ unit larger than for normal chunks, because there is no following chunk whose prev_size field could be used.
See the front_misalign handling below, for glibc there is no need for further alignments unless we have have high alignment. */ if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) size = ALIGN_UP (nb + SIZE_SZ, pagesize); else size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); tried_mmap = true;
/* Don't try if size wraps around 0 */ if ((unsignedlong) (size) > (unsignedlong) (nb)) { mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mm != MAP_FAILED) { /* The offset to the start of the mmapped region is stored in the prev_size field of the chunk. This allows us to adjust returned start address to meet alignment requirements here and in memalign(), and still be able to compute proper address argument for later munmap in free() and realloc(). */
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) { /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); front_misalign = 0; } else front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { correction = MALLOC_ALIGNMENT - front_misalign; p = (mchunkptr) (mm + correction); p->prev_size = correction; set_head (p, (size - correction) | IS_MMAPPED); } else { p = (mchunkptr) mm; set_head (p, size | IS_MMAPPED); }
/* update statistics */
int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; atomic_max (&mp_.max_n_mmaps, new);
/* Precondition: not enough current space to satisfy nb request */ assert ((unsignedlong) (old_size) < (unsignedlong) (nb + MINSIZE));
if (av != &main_arena) { heap_info *old_heap, *heap; size_t old_heap_size;
/* First try to extend the current heap. */ old_heap = heap_for_ptr (old_top); old_heap_size = old_heap->size; if ((long) (MINSIZE + nb - old_size) > 0 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0) { av->system_mem += old_heap->size - old_heap_size; arena_mem += old_heap->size - old_heap_size; set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top) | PREV_INUSE); } elseif ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad))) { /* Use a newly allocated heap. */ heap->ar_ptr = av; heap->prev = old_heap; av->system_mem += heap->size; arena_mem += heap->size; /* Set up the new top. */ top (av) = chunk_at_offset (heap, sizeof (*heap)); set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
/* Setup fencepost and free the old top chunk with a multiple of MALLOC_ALIGNMENT in size. */ /* The fencepost takes at least MINSIZE bytes, because it might become the top chunk again later. Note that a footer is set up, too, although the chunk is marked in use. */ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); if (old_size >= MINSIZE) { set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); _int_free (av, old_top, 1); } else { set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); set_foot (old_top, (old_size + 2 * SIZE_SZ)); } } elseif (!tried_mmap) /* We can at least try to use to mmap memory. */ goto try_mmap; } else/* av == main_arena */
{ /* Request enough space for nb + pad + overhead */ size = nb + mp_.top_pad + MINSIZE;
/* If contiguous, we can subtract out existing space that we hope to combine with new space. We add it back later only if we don't actually get contiguous space. */
if (contiguous (av)) size -= old_size;
/* Round to a multiple of page size. If MORECORE is not contiguous, this ensures that we only call it with whole-page arguments. And if MORECORE is contiguous and this is not first time through, this preserves page-alignment of previous calls. Otherwise, we correct to page-align below. */
size = ALIGN_UP (size, pagesize);
/* Don't try to call MORECORE if argument is so big as to appear negative. Note that since mmap takes size_t arg, it may succeed below even if we cannot call MORECORE. */
if (brk != (char *) (MORECORE_FAILURE)) { /* Call the `morecore' hook if necessary. */ void (*hook) (void) = atomic_forced_read (__after_morecore_hook); if (__builtin_expect (hook != NULL, 0)) (*hook)(); } else { /* If have mmap, try using it as a backup when MORECORE fails or cannot be used. This is worth doing on systems that have "holes" in address space, so sbrk cannot extend to give contiguous space, but space is available elsewhere. Note that we ignore mmap max count and threshold limits, since the space will not be used as a segregated mmap region. */
/* Cannot merge with old top, so add its size back in */ if (contiguous (av)) size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */ if ((unsignedlong) (size) < (unsignedlong) (MMAP_AS_MORECORE_SIZE)) size = MMAP_AS_MORECORE_SIZE;
/* Don't try if size wraps around 0 */ if ((unsignedlong) (size) > (unsignedlong) (nb)) { char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mbrk != MAP_FAILED) { /* We do not need, and cannot use, another sbrk call to find end */ brk = mbrk; snd_brk = brk + size;
/* Record that we no longer have a contiguous sbrk region. After the first time mmap is used as backup, we do not ever rely on contiguous space since this could incorrectly bridge regions. */ set_noncontiguous (av); } } }
if (brk != (char *) (MORECORE_FAILURE)) { if (mp_.sbrk_base == 0) mp_.sbrk_base = brk; av->system_mem += size;
/* If MORECORE extends previous space, we can likewise extend top size. */
* If the first time through or noncontiguous, we need to call sbrk just to find out where the end of memory lies.
* We need to ensure that all returned chunks from malloc will meet MALLOC_ALIGNMENT
* If there was an intervening foreign sbrk, we need to adjust sbrk request size to account for fact that we will not be able to combine new space with existing space in old_top.
* Almost all systems internally allocate whole pages at a time, in which case we might as well use the whole last page of request. So we allocate enough more memory to hit a page boundary now, which in turn causes future contiguous calls to page-align. */
/* handle contiguous cases */ if (contiguous (av)) { /* Count foreign sbrk as system_mem. */ if (old_size) av->system_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */
/* If can't allocate correction, try to at least find out current brk. It might be enough to proceed without failing.
Note that if second sbrk did NOT fail, we assume that space is contiguous with first sbrk. This is a safe assumption unless program is multithreaded but doesn't use locks and a foreign sbrk occurred between our first and second calls. */
/* handle non-contiguous cases */ else { if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) /* MORECORE/mmap must correctly align */ assert (((unsignedlong) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); else { front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */
/* Find out current end of memory */ if (snd_brk == (char *) (MORECORE_FAILURE)) { snd_brk = (char *) (MORECORE (0)); } }
/* Adjust top based on results of second sbrk */ if (snd_brk != (char *) (MORECORE_FAILURE)) { av->top = (mchunkptr) aligned_brk; set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); av->system_mem += correction;
/* If not the first time through, we either have a gap due to foreign sbrk or a non-contiguous region. Insert a double fencepost at old_top to prevent consolidation with space we don't own. These fenceposts are artificial chunks that are marked as inuse and are in any case too small to use. We need two to make sizes and alignments work out. */
if (old_size != 0) { /* Shrink old_top to insert fenceposts, keeping size a multiple of MALLOC_ALIGNMENT. We know there is at least enough space in old_top to do this. */ old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK; set_head (old_top, old_size | PREV_INUSE);
/* Note that the following assignments completely overwrite old_top when old_size was previously MINSIZE. This is intentional. We need the fencepost, even if old_top otherwise gets lost. */ chunk_at_offset (old_top, old_size)->size = (2 * SIZE_SZ) | PREV_INUSE;
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. No swap space needs to be reserved for the following large mapping (on Linux, this is the case for all non-writable mappings anyway). */ p2 = MAP_FAILED; if (aligned_heap_area) { p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); aligned_heap_area = NULL; if (p2 != MAP_FAILED && ((unsignedlong) p2 & (HEAP_MAX_SIZE - 1))) { __munmap (p2, HEAP_MAX_SIZE); p2 = MAP_FAILED; } } if (p2 == MAP_FAILED) { p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE); if (p1 != MAP_FAILED) { p2 = (char *) (((unsignedlong) p1 + (HEAP_MAX_SIZE - 1)) & ~(HEAP_MAX_SIZE - 1)); ul = p2 - p1; if (ul) __munmap (p1, ul); else aligned_heap_area = p2 + HEAP_MAX_SIZE; __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); } else { /* Try to take the chance that an allocation of only HEAP_MAX_SIZE is already aligned. */ p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); if (p2 == MAP_FAILED) return0;