INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
structmalloc_chunk* fd;/* double links -- used only if free. */ structmalloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk* fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk* bk_nextsize; };
// 典型的chunk结构 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk, if allocated | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk, in bytes |M|P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | User data starts here... . . . . (malloc_usable_size() bytes) . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//free chunk chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top;//top chunk堆顶
/* The remainder from the most recent split of a small request */ mchunkptr last_remainder;//指向上一个chunk分配出一个small chunk之后剩余的部分
/* Normal bins packed as described above */ mchunkptr bins[NBINS * 2 - 2];//存储unsorted_bin,small_bin.large_bin的链表头
/* Bitmap of bins */ unsignedint binmap[BINMAPSIZE];//每一个bit表示对应的bin中是否存在空闲chunk
/* Linked list */ structmalloc_state *next;
/* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ structmalloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem;//当前内存的分配量 INTERNAL_SIZE_T max_system_mem; };
/* Memory map support */ int n_mmaps;//当前进程使用mmap()分配的内存块的个数 int n_mmaps_max;//可以使用mmap()分配的内存块的最大数量 int max_n_mmaps; /* the mmap_threshold is dynamic, until the user sets it manually, at which point we need to disable any dynamic behavior. */ int no_dyn_threshold;//是否开启mmap分配阈值动态调整,0表示开启,默认为0
/* Statistics */ INTERNAL_SIZE_T mmapped_mem;//mmap分配的内存大小 /*INTERNAL_SIZE_T sbrked_mem;*/ /*INTERNAL_SIZE_T max_sbrked_mem;*/ INTERNAL_SIZE_T max_mmapped_mem;//mmap分配的内存大小,一般与mmaped_mem相等 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS *///单线程情况下统计进程分配的内存总数
/* First address handed out by MORECORE/sbrk. */ char *sbrk_base; };
typedefstruct _heap_info { mstate ar_ptr; /* Arena for this heap. */ struct _heap_info *prev;/* Previous heap. */ size_t size; /* Current size in bytes. */ size_t mprotect_size; /* Size in bytes that has been mprotected PROT_READ|PROT_WRITE. */ /* Make sure the following data is properly aligned, particularly that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of MALLOC_ALIGNMENT. */ char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; } heap_info;
victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }
if (ar_ptr != NULL) (void) mutex_unlock (&ar_ptr->mutex);
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely (av == NULL)) { void *p = sysmalloc (nb, av); if (p != NULL) alloc_perturb (p, bytes); return p; }
for (;; ){ while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av)){ //处理unsorted bin //分配small bin } if (!in_smallbin_range (nb)) { bin = bin_at (av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first (bin)) != bin && (unsignedlong) (victim->size) >= (unsignedlong) (nb)) { victim = victim->bk_nextsize; while (((unsignedlong) (size = chunksize (victim)) < (unsignedlong) (nb))) victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last (bin) && victim->size == victim->fd->size) victim = victim->fd;
for (;; ){ //处理unsorted bin,分配small bin //处理用户申请的size所对应的large bin中存在符合要求的chunk的情况 ++idx; bin = bin_at (av, idx); block = idx2block (idx); map = av->binmap[block]; bit = idx2bit (idx);
for (;; ) { /* Skip rest of block if there are no more set bits in this block. */ //根据block找到存在空闲chunk的large bin链表 if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ((map = av->binmap[block]) == 0);
bin = bin_at (av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ //在block中找到的存在符合要求的large bin链表的头指针 while ((bit & map) == 0) { bin = next_bin (bin); bit <<= 1; assert (bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ victim = last (bin);
/* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin (bin); bit <<= 1; }
else { size = chunksize (victim);
/* We know the first chunk in this bin is big enough to use. */ assert ((unsignedlong) (size) >= (unsignedlong) (nb));
remainder_size = size - nb;
/* unlink */ unlink (av, victim, bck, fwd);
/* Exhaust */ if (remainder_size < MINSIZE) { //...与上一个代码块相同,处理不能拆分chunk的操作 set_inuse_bit_at_offset (victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; }
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ //uintptr_t即unsigned long int if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) { errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) (void) mutex_unlock (&av->mutex); malloc_printerr (check_action, errstr, chunk2mem (p), av); return; } /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) { errstr = "free(): invalid size"; goto errout; }
if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast ())
#if TRIM_FASTBINS//即若与top chunk相邻,不立即与top chunk合并 /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) {
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { /* We might not have a lock at this point and concurrent modifications of system_mem might have let to a false positive. Redo the test after getting the lock. */ if (have_lock || ({ assert (locked == 0); mutex_lock(&av->mutex); locked = 1; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; })) { errstr = "free(): invalid next size (fast)"; goto errout; } if (! have_lock) { (void)mutex_unlock(&av->mutex); locked = 0; } } //... }
if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast ()/*忽略 if .. endif*/){ //size合法性验证 //设置标志位,获取相应fast bin链表的头指针 do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } /* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been deallocated. See use of OLD_IDX below for the actual check. */ if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast ()/*忽略 if .. endif*/){ //处理size大小小于最大fast bin限制的chunk,将其插入到fast bin链表中 } elseif (!chunk_is_mmapped(p)) { if (! have_lock) { (void)mutex_lock(&av->mutex); locked = 1; }
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the top block. */ if (__glibc_unlikely (p == av->top)) { errstr = "double free or corruption (top)"; goto errout; } /* Or whether the next chunk is beyond the boundaries of the arena. */ if (__builtin_expect (contiguous (av) && (char *) nextchunk >= ((char *) av->top + chunksize(av->top)), 0)) { errstr = "double free or corruption (out)"; goto errout; } /* Or whether the block is actually not marked used. */ if (__glibc_unlikely (!prev_inuse(nextchunk))) { errstr = "double free or corruption (!prev)"; goto errout; }
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */
if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast ()/*忽略 if .. endif*/){ //处理size大小小于最大fast bin限制的chunk,将其插入到fast bin链表中 } elseif (!chunk_is_mmapped(p)) { free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); //合法性验证 //合并物理相邻的上一个chunk //将chunk合并到unsorted bin中 if ((unsignedlong)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av);
if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsignedlong)(chunksize(av->top)) >= (unsignedlong)(mp_.trim_threshold)) systrim(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));
/* realloc of null is supposed to be same as malloc */ if (oldmem == 0) return __libc_malloc (bytes); /* chunk corresponding to oldmem */ const mchunkptr oldp = mem2chunk (oldmem); /* its size */ const INTERNAL_SIZE_T oldsize = chunksize (oldp);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) || __builtin_expect (misaligned_chunk (oldp), 0)) { malloc_printerr (check_action, "realloc(): invalid pointer", oldmem, ar_ptr); returnNULL; }
/* Avoid copy if newp is next chunk after oldp. */ if (newp == next) { newsize += oldsize; newp = oldp; } else { //这里是正常进行扩展的部分,也就是malloc一个申请大小的chunk,拷贝数据之后释放原有的chunk /* Unroll copy of <= 36 bytes (72 if 8byte sizes) We know that contents have an odd number of INTERNAL_SIZE_T-sized words; minimally 3. */
staticvoid * sysmalloc(INTERNAL_SIZE_T nb, mstate av) { //参数定义 /* If have mmap, and the request size meets the mmap threshold, and the system supports mmap, and there are few enough currently allocated mmapped regions, try to directly map this request rather than expanding top. */ if (av == NULL || ((unsignedlong) (nb) >= (unsignedlong) (mp_.mmap_threshold) && (mp_.n_mmaps < mp_.n_mmaps_max))) { char *mm; /* return value from mmap call*/
try_mmap: /* Round up size to nearest page. For mmapped chunks, the overhead is one SIZE_SZ unit larger than for normal chunks, because there is no following chunk whose prev_size field could be used. See the front_misalign handling below, for glibc there is no need for further alignments unless we have have high alignment. */ if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) size = ALIGN_UP (nb + SIZE_SZ, pagesize); else size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); tried_mmap = true;
/* Don't try if size wraps around 0 */ if ((unsignedlong) (size) > (unsignedlong) (nb)) { mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mm != MAP_FAILED) { /* The offset to the start of the mmapped region is stored in the prev_size field of the chunk. This allows us to adjust returned start address to meet alignment requirements here and in memalign(), and still be able to compute proper address argument for later munmap in free() and realloc(). */
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) { /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); front_misalign = 0; } else front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { correction = MALLOC_ALIGNMENT - front_misalign; p = (mchunkptr) (mm + correction); p->prev_size = correction; set_head (p, (size - correction) | IS_MMAPPED); } else { p = (mchunkptr) mm; set_head (p, size | IS_MMAPPED); }
/* update statistics */
int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; atomic_max (&mp_.max_n_mmaps, new);
staticvoid * sysmalloc(INTERNAL_SIZE_T nb, mstate av) { //参数定义 if (av == NULL || ((unsignedlong) (nb) >= (unsignedlong) (mp_.mmap_threshold) && (mp_.n_mmaps < mp_.n_mmaps_max))) { //处理可以使用mmap的情况 } if (av == NULL) return0; // top chunk 参数和大小检查 if (av != &main_arena) { //参数获取 if ((long) (MINSIZE + nb - old_size) > 0 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0) { //直接向top chunk高地址处扩展内存成功 } elseif ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad))) { /* Use a newly allocated heap. */ heap->ar_ptr = av; heap->prev = old_heap; av->system_mem += heap->size; arena_mem += heap->size; /* Set up the new top. */ top (av) = chunk_at_offset (heap, sizeof (*heap)); set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
/* Setup fencepost and free the old top chunk with a multiple of MALLOC_ALIGNMENT in size. */ /* The fencepost takes at least MINSIZE bytes, because it might become the top chunk again later. Note that a footer is set up, too, although the chunk is marked in use. */ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); if (old_size >= MINSIZE) { set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); _int_free (av, old_top, 1); } else { set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); set_foot (old_top, (old_size + 2 * SIZE_SZ)); } } elseif (!tried_mmap) /* We can at least try to use to mmap memory. */ goto try_mmap; } //... }
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. No swap space needs to be reserved for the following large mapping (on Linux, this is the case for all non-writable mappings anyway). */ p2 = MAP_FAILED; if (aligned_heap_area) { p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); aligned_heap_area = NULL; if (p2 != MAP_FAILED && ((unsignedlong) p2 & (HEAP_MAX_SIZE - 1))) { __munmap (p2, HEAP_MAX_SIZE); p2 = MAP_FAILED; } } if (p2 == MAP_FAILED) { p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE); if (p1 != MAP_FAILED) { p2 = (char *) (((unsignedlong) p1 + (HEAP_MAX_SIZE - 1)) & ~(HEAP_MAX_SIZE - 1)); ul = p2 - p1; if (ul) __munmap (p1, ul); else aligned_heap_area = p2 + HEAP_MAX_SIZE; __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); } else { /* Try to take the chance that an allocation of only HEAP_MAX_SIZE is already aligned. */ p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); if (p2 == MAP_FAILED) return0;
staticvoid * sysmalloc(INTERNAL_SIZE_T nb, mstate av) { //参数定义 if (av == NULL || ((unsignedlong) (nb) >= (unsignedlong) (mp_.mmap_threshold) && (mp_.n_mmaps < mp_.n_mmaps_max))) { //处理可以使用mmap的情况 } if (av == NULL) return0; // top chunk 参数和大小检查 if (av != &main_arena) { //处理当前的分配区为非主分配区的情况 } else/* av == main_arena */
{ /* Request enough space for nb + pad + overhead */ size = nb + mp_.top_pad + MINSIZE;
/* If contiguous, we can subtract out existing space that we hope to combine with new space. We add it back later only if we don't actually get contiguous space. */
if (contiguous (av)) size -= old_size;
/* Round to a multiple of page size. If MORECORE is not contiguous, this ensures that we only call it with whole-page arguments. And if MORECORE is contiguous and this is not first time through, this preserves page-alignment of previous calls. Otherwise, we correct to page-align below. */
size = ALIGN_UP (size, pagesize);
/* Don't try to call MORECORE if argument is so big as to appear negative. Note that since mmap takes size_t arg, it may succeed below even if we cannot call MORECORE. */
if (size > 0) { brk = (char *) (MORECORE (size)); LIBC_PROBE (memory_sbrk_more, 2, brk, size); } if (brk != (char *) (MORECORE_FAILURE)) { /* Call the `morecore' hook if necessary. */ void (*hook) (void) = atomic_forced_read (__after_morecore_hook); if (__builtin_expect (hook != NULL, 0)) (*hook)(); } else { /* If have mmap, try using it as a backup when MORECORE fails or cannot be used. This is worth doing on systems that have "holes" in address space, so sbrk cannot extend to give contiguous space, but space is available elsewhere. Note that we ignore mmap max count and threshold limits, since the space will not be used as a segregated mmap region. */
/* Cannot merge with old top, so add its size back in */ if (contiguous (av)) size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */ if ((unsignedlong) (size) < (unsignedlong) (MMAP_AS_MORECORE_SIZE)) size = MMAP_AS_MORECORE_SIZE;
/* Don't try if size wraps around 0 */ if ((unsignedlong) (size) > (unsignedlong) (nb)) { char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
if (mbrk != MAP_FAILED) { /* We do not need, and cannot use, another sbrk call to find end */ brk = mbrk; snd_brk = brk + size;
/* Record that we no longer have a contiguous sbrk region. After the first time mmap is used as backup, we do not ever rely on contiguous space since this could incorrectly bridge regions. */ set_noncontiguous (av); } } } //... } //... }
/* Otherwise, make adjustments: * If the first time through or noncontiguous, we need to call sbrk just to find out where the end of memory lies. * We need to ensure that all returned chunks from malloc will meet MALLOC_ALIGNMENT * If there was an intervening foreign sbrk, we need to adjust sbrk request size to account for fact that we will not be able to combine new space with existing space in old_top. * Almost all systems internally allocate whole pages at a time, in which case we might as well use the whole last page of request. So we allocate enough more memory to hit a page boundary now, which in turn causes future contiguous calls to page-align. */
/* handle contiguous cases */ if (contiguous (av)) { /* Count foreign sbrk as system_mem. */ if (old_size) av->system_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */
/* If can't allocate correction, try to at least find out current brk. It might be enough to proceed without failing. Note that if second sbrk did NOT fail, we assume that space is contiguous with first sbrk. This is a safe assumption unless program is multithreaded but doesn't use locks and a foreign sbrk occurred between our first and second calls. */
/* handle non-contiguous cases */ else { if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) /* MORECORE/mmap must correctly align */ assert (((unsignedlong) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); else { front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) { /* Skip over some bytes to arrive at an aligned position. We don't need to specially mark these wasted front bytes. They will never be accessed anyway because prev_inuse of av->top (and any chunk created from its start) is always true after initialization. */
/* Find out current end of memory */ if (snd_brk == (char *) (MORECORE_FAILURE)) { snd_brk = (char *) (MORECORE (0)); } } //此时已经获得了对齐之后的内存空间的起始地址aligned_brk /* Adjust top based on results of second sbrk */ if (snd_brk != (char *) (MORECORE_FAILURE)) { av->top = (mchunkptr) aligned_brk; set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); av->system_mem += correction;
/* If not the first time through, we either have a gap due to foreign sbrk or a non-contiguous region. Insert a double fencepost at old_top to prevent consolidation with space we don't own. These fenceposts are artificial chunks that are marked as inuse and are in any case too small to use. We need two to make sizes and alignments work out. */
if (old_size != 0) { /* Shrink old_top to insert fenceposts, keeping size a multiple of MALLOC_ALIGNMENT. We know there is at least enough space in old_top to do this. */ old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK; set_head (old_top, old_size | PREV_INUSE);
/* Note that the following assignments completely overwrite old_top when old_size was previously MINSIZE. This is intentional. We need the fencepost, even if old_top otherwise gets lost. */ chunk_at_offset (old_top, old_size)->size = (2 * SIZE_SZ) | PREV_INUSE;
staticvoid ptmalloc_init(void) { if (__malloc_initialized >= 0) return;
__malloc_initialized = 0;
#ifdef SHARED //这里是保证只有主分配区使用sbrk分配连续的内存空间 //如果glibc不在默认的命名空间,或者是程序是静态编译并调用了dl_open函数加载ptmalloc_ini //这种情况下glibc不允许使用sbrk分配内存(修改了__morecore) /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; structlink_map *l;
while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { //获取第一次出现=的位置 size_t len = strcspn (envline, "=");
if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue;