INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
structmalloc_chunk *fd;/* double links -- used only if free. */ structmalloc_chunk *bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk *fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk *bk_nextsize; };
/* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top;
/* The remainder from the most recent split of a small request */ mchunkptr last_remainder;
/* Normal bins packed as described above */ mchunkptr bins[NBINS * 2 - 2];
/* Bitmap of bins */ unsignedint binmap[BINMAPSIZE];
/* Linked list */ structmalloc_state *next;
/* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ structmalloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };
/* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top;
/* The remainder from the most recent split of a small request */ mchunkptr last_remainder;
/* Normal bins packed as described above */ mchunkptr bins[NBINS * 2 - 2];
/* Bitmap of bins */ unsignedint binmap[BINMAPSIZE];
/* Linked list */ structmalloc_state *next;
/* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ structmalloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };
victim = _int_malloc(ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE(memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes); }
if (ar_ptr != NULL) (void)mutex_unlock(&ar_ptr->mutex);
staticvoid * _int_malloc(mstate av, size_t bytes) { INTERNAL_SIZE_T nb; /* normalized request size */ unsignedint idx; /* associated bin index */ mbinptr bin; /* associated bin */
mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T size; /* its size */ int victim_index; /* its bin index */
mchunkptr remainder; /* remainder from a split */ unsignedlong remainder_size; /* its size */
unsignedint block; /* bit map traverser */ unsignedint bit; /* bit map traverser */ unsignedintmap; /* current word of binmap */
mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */
constchar *errstr = NULL;
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */
checked_request2size(bytes, nb);
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely(av == NULL)) { void *p = sysmalloc(nb, av); if (p != NULL) alloc_perturb(p, bytes); return p; }
for (;;) { int iters = 0; while ((victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { bck = victim->bk; if (__builtin_expect(victim->size <= 2 * SIZE_SZ, 0) || __builtin_expect(victim->size > av->system_mem, 0)) malloc_printerr(check_action, "malloc(): memory corruption", chunk2mem(victim), av);
size = chunksize(victim);
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */
/* maintain large bins in sorted order */ if (fwd != bck) { /* Or with inuse bit to speed comparisons */ size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ assert((bck->bk->size & NON_MAIN_ARENA) == 0); if ((unsignedlong)(size) < (unsignedlong)(bck->bk->size)) { fwd = bck; bck = bck->bk;
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */
if (!in_smallbin_range(nb)) { bin = bin_at(av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first(bin)) != bin && (unsignedlong)(victim->size) >= (unsignedlong)(nb)) { victim = victim->bk_nextsize; while (((unsignedlong)(size = chunksize(victim)) < (unsignedlong)(nb))) victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last(bin) && victim->size == victim->fd->size) victim = victim->fd;
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; } /* Split */ else { remainder = chunk_at_offset(victim, nb); /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely(fwd->bk != bck)) { errstr = "malloc(): corrupted unsorted chunks"; goto errout; } remainder->bk = bck; remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder; if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); alloc_perturb(p, bytes); return p; } }
/* Search for a chunk by scanning bins, starting with next largest bin. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected.
The bitmap avoids needing to check that most blocks are nonempty. The particular case of skipping all bins during warm-up phases when no chunks have been returned yet is faster than it might look. */
++idx; bin = bin_at(av, idx); block = idx2block(idx); map = av->binmap[block]; bit = idx2bit(idx);
++idx; bin = bin_at(av, idx); block = idx2block(idx); map = av->binmap[block]; bit = idx2bit(idx);
for (;;) { /* Skip rest of block if there are no more set bits in this block. */ if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ((map = av->binmap[block]) == 0);
bin = bin_at(av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ while ((bit & map) == 0) { bin = next_bin(bin); bit <<= 1; assert(bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ victim = last(bin);
/* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin(bin); bit <<= 1; }
else { size = chunksize(victim);
/* We know the first chunk in this bin is big enough to use. */ assert((unsignedlong)(size) >= (unsignedlong)(nb));
remainder_size = size - nb;
/* unlink */ unlink(av, victim, bck, fwd);
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; }
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations).
We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise be exhausted by current request, it is replenished. (The main reason for ensuring it exists is that we may need MINSIZE space to put in fenceposts in sysmalloc.) */
elseif (have_fastchunks(av)) { malloc_consolidate(av); /* restore original bin index */ if (in_smallbin_range(nb)) idx = smallbin_index(nb); else idx = largebin_index(nb); }
staticvoid _int_free(mstate av, mchunkptr p, int have_lock) { INTERNAL_SIZE_T size; /* its size */ mfastbinptr *fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */
constchar *errstr = NULL; int locked = 0;
size = chunksize(p);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect((uintptr_t)p > (uintptr_t)-size, 0) || __builtin_expect(misaligned_chunk(p), 0)) { errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) (void)mutex_unlock(&av->mutex); malloc_printerr(check_action, errstr, chunk2mem(p), av); return; } /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ if (__glibc_unlikely(size < MINSIZE || !aligned_OK(size))) { errstr = "free(): invalid size"; goto errout; }
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely(fwd->bk != bck)) { errstr = "free(): corrupted unsorted chunks"; goto errout; } p->fd = fwd; p->bk = bck; if (!in_smallbin_range(size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p;
if ((unsignedlong)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av);
if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsignedlong)(chunksize(av->top)) >= (unsignedlong)(mp_.trim_threshold)) systrim(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));
rut=lambda s :p.recvuntil(s,timeout=0.3) ru=lambda s :p.recvuntil(s) r=lambda n :p.recv(n) sl=lambda s :p.sendline(s) sls=lambda s :p.sendline(str(s)) ss=lambda s :p.send(str(s)) s=lambda s :p.send(s) uu64=lambda data :u64(data.ljust(8,'\x00')) it=lambda :p.interactive() b=lambda :gdb.attach(p) bp=lambda bkp:gdb.attach(p,'b *'+str(bkp)) get_leaked_libc = lambda :u64(ru('\x7f')[-6:].ljust(8,'\x00'))
LOGTOOL={} defLOGALL(): log.success("**** all result ****") for i in LOGTOOL.items(): log.success("%-25s%s"%(i[0]+":",hex(i[1])))
defget_base(a, text_name): text_addr = 0 libc_base = 0 for name, addr in a.libs().items(): if text_name in name: text_addr = addr elif"libc"in name: libc_base = addr return text_addr, libc_base defdebug(): global p text_base, libc_base = get_base(p, 'noka') script = ''' set $text_base = {} set $libc_base = {} b _IO_flush_all_lockp b exit b _IO_wfile_seekoff b _IO_switch_to_wget_mode '''.format(text_base, libc_base) #b*$rebase(0x1813) #b*$rebase(0x18e5) #b mprotect #b *($text_base+0x0000000000000000F84) #b *($text_base+0x000000000000134C) # b *($text_base+0x0000000000000000001126) #dprintf *($text_base+0x04441),"%c",$ax #dprintf *($text_base+0x04441),"%c",$ax #0x12D5 #0x04441 #b *($text_base+0x0000000000001671) gdb.attach(p, script)