Rizin
unix-like reverse engineering framework and cli tools
malloc.c
Go to the documentation of this file.
1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <stdint.h>
6 #include <errno.h>
7 #include <sys/mman.h>
8 #include "libc.h"
9 #include "atomic.h"
10 #include "pthread_impl.h"
11 
12 #if defined(__GNUC__) && defined(__PIC__)
13 #define inline inline __attribute__((always_inline))
14 #endif
15 
16 void *__mmap(void *, size_t, int, int, int, off_t);
17 int __munmap(void *, size_t);
18 void *__mremap(void *, size_t, size_t, int, ...);
19 int __madvise(void *, size_t, int);
20 
21 struct chunk {
22  size_t psize, csize;
23  struct chunk *next, *prev;
24 };
25 
26 struct bin {
27  volatile int lock[2];
28  struct chunk *head;
29  struct chunk *tail;
30 };
31 
32 static struct {
33  volatile uint64_t binmap;
34  struct bin bins[64];
35  volatile int free_lock[2];
36 } mal;
37 
38 
39 #define SIZE_ALIGN (4*sizeof(size_t))
40 #define SIZE_MASK (-SIZE_ALIGN)
41 #define OVERHEAD (2*sizeof(size_t))
42 #define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
43 #define DONTCARE 16
44 #define RECLAIM 163840
45 
46 #define CHUNK_SIZE(c) ((c)->csize & -2)
47 #define CHUNK_PSIZE(c) ((c)->psize & -2)
48 #define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
49 #define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
50 #define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
51 #define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD)
52 #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
53 
54 #define C_INUSE ((size_t)1)
55 
56 #define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
57 
58 
59 /* Synchronization tools */
60 
61 static inline void lock(volatile int *lk)
62 {
63  if (libc.threads_minus_1)
64  while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
65 }
66 
67 static inline void unlock(volatile int *lk)
68 {
69  if (lk[0]) {
70  a_store(lk, 0);
71  if (lk[1]) __wake(lk, 1, 1);
72  }
73 }
74 
75 static inline void lock_bin(int i)
76 {
77  lock(mal.bins[i].lock);
78  if (!mal.bins[i].head)
79  mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
80 }
81 
82 static inline void unlock_bin(int i)
83 {
84  unlock(mal.bins[i].lock);
85 }
86 
87 static int first_set(uint64_t x)
88 {
89 #if 1
90  return a_ctz_64(x);
91 #else
92  static const char debruijn64[64] = {
93  0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
94  62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
95  63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
96  51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
97  };
98  static const char debruijn32[32] = {
99  0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
100  31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
101  };
102  if (sizeof(long) < 8) {
103  uint32_t y = x;
104  if (!y) {
105  y = x>>32;
106  return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
107  }
108  return debruijn32[(y&-y)*0x076be629 >> 27];
109  }
110  return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
111 #endif
112 }
113 
114 static const unsigned char bin_tab[60] = {
115  32,33,34,35,36,36,37,37,38,38,39,39,
116  40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
117  44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
118  46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
119 };
120 
121 static int bin_index(size_t x)
122 {
123  x = x / SIZE_ALIGN - 1;
124  if (x <= 32) return x;
125  if (x < 512) return bin_tab[x/8-4];
126  if (x > 0x1c00) return 63;
127  return bin_tab[x/128-4] + 16;
128 }
129 
130 static int bin_index_up(size_t x)
131 {
132  x = x / SIZE_ALIGN - 1;
133  if (x <= 32) return x;
134  x--;
135  if (x < 512) return bin_tab[x/8-4] + 1;
136  return bin_tab[x/128-4] + 17;
137 }
138 
139 #if 0
140 void __dump_heap(int x)
141 {
142  struct chunk *c;
143  int i;
144  for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
145  fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
147  c->csize & 15,
148  NEXT_CHUNK(c)->psize & 15);
149  for (i=0; i<64; i++) {
150  if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
151  fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
152  if (!(mal.binmap & 1ULL<<i))
153  fprintf(stderr, "missing from binmap!\n");
154  } else if (mal.binmap & 1ULL<<i)
155  fprintf(stderr, "binmap wrongly contains %d!\n", i);
156  }
157 }
158 #endif
159 
160 void *__expand_heap(size_t *);
161 
162 static struct chunk *expand_heap(size_t n)
163 {
164  static int heap_lock[2];
165  static void *end;
166  void *p;
167  struct chunk *w;
168 
169  /* The argument n already accounts for the caller's chunk
170  * overhead needs, but if the heap can't be extended in-place,
171  * we need room for an extra zero-sized sentinel chunk. */
172  n += SIZE_ALIGN;
173 
174  lock(heap_lock);
175 
176  p = __expand_heap(&n);
177  if (!p) {
178  unlock(heap_lock);
179  return 0;
180  }
181 
182  /* If not just expanding existing space, we need to make a
183  * new sentinel chunk below the allocated space. */
184  if (p != end) {
185  /* Valid/safe because of the prologue increment. */
186  n -= SIZE_ALIGN;
187  p = (char *)p + SIZE_ALIGN;
188  w = MEM_TO_CHUNK(p);
189  w->psize = 0 | C_INUSE;
190  }
191 
192  /* Record new heap end and fill in footer. */
193  end = (char *)p + n;
194  w = MEM_TO_CHUNK(end);
195  w->psize = n | C_INUSE;
196  w->csize = 0 | C_INUSE;
197 
198  /* Fill in header, which may be new or may be replacing a
199  * zero-size sentinel header at the old end-of-heap. */
200  w = MEM_TO_CHUNK(p);
201  w->csize = n | C_INUSE;
202 
203  unlock(heap_lock);
204 
205  return w;
206 }
207 
208 static int adjust_size(size_t *n)
209 {
210  /* Result of pointer difference must fit in ptrdiff_t. */
211  if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
212  if (*n) {
213  errno = ENOMEM;
214  return -1;
215  } else {
216  *n = SIZE_ALIGN;
217  return 0;
218  }
219  }
220  *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
221  return 0;
222 }
223 
224 static void unbin(struct chunk *c, int i)
225 {
226  if (c->prev == c->next)
227  a_and_64(&mal.binmap, ~(1ULL<<i));
228  c->prev->next = c->next;
229  c->next->prev = c->prev;
230  c->csize |= C_INUSE;
231  NEXT_CHUNK(c)->psize |= C_INUSE;
232 }
233 
234 static int alloc_fwd(struct chunk *c)
235 {
236  int i;
237  size_t k;
238  while (!((k=c->csize) & C_INUSE)) {
239  i = bin_index(k);
240  lock_bin(i);
241  if (c->csize == k) {
242  unbin(c, i);
243  unlock_bin(i);
244  return 1;
245  }
246  unlock_bin(i);
247  }
248  return 0;
249 }
250 
251 static int alloc_rev(struct chunk *c)
252 {
253  int i;
254  size_t k;
255  while (!((k=c->psize) & C_INUSE)) {
256  i = bin_index(k);
257  lock_bin(i);
258  if (c->psize == k) {
259  unbin(PREV_CHUNK(c), i);
260  unlock_bin(i);
261  return 1;
262  }
263  unlock_bin(i);
264  }
265  return 0;
266 }
267 
268 
269 /* pretrim - trims a chunk _prior_ to removing it from its bin.
270  * Must be called with i as the ideal bin for size n, j the bin
271  * for the _free_ chunk self, and bin j locked. */
272 static int pretrim(struct chunk *self, size_t n, int i, int j)
273 {
274  size_t n1;
275  struct chunk *next, *split;
276 
277  /* We cannot pretrim if it would require re-binning. */
278  if (j < 40) return 0;
279  if (j < i+3) {
280  if (j != 63) return 0;
281  n1 = CHUNK_SIZE(self);
282  if (n1-n <= MMAP_THRESHOLD) return 0;
283  } else {
284  n1 = CHUNK_SIZE(self);
285  }
286  if (bin_index(n1-n) != j) return 0;
287 
288  next = NEXT_CHUNK(self);
289  split = (void *)((char *)self + n);
290 
291  split->prev = self->prev;
292  split->next = self->next;
293  split->prev->next = split;
294  split->next->prev = split;
295  split->psize = n | C_INUSE;
296  split->csize = n1-n;
297  next->psize = n1-n;
298  self->csize = n | C_INUSE;
299  return 1;
300 }
301 
302 static void trim(struct chunk *self, size_t n)
303 {
304  size_t n1 = CHUNK_SIZE(self);
305  struct chunk *next, *split;
306 
307  if (n >= n1 - DONTCARE) return;
308 
309  next = NEXT_CHUNK(self);
310  split = (void *)((char *)self + n);
311 
312  split->psize = n | C_INUSE;
313  split->csize = n1-n | C_INUSE;
314  next->psize = n1-n | C_INUSE;
315  self->csize = n | C_INUSE;
316 
317  free(CHUNK_TO_MEM(split));
318 }
319 
320 void *malloc(size_t n)
321 {
322  struct chunk *c;
323  int i, j;
324 
325  if (adjust_size(&n) < 0) return 0;
326 
327  if (n > MMAP_THRESHOLD) {
328  size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
329  char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
330  MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
331  if (base == (void *)-1) return 0;
332  c = (void *)(base + SIZE_ALIGN - OVERHEAD);
333  c->csize = len - (SIZE_ALIGN - OVERHEAD);
334  c->psize = SIZE_ALIGN - OVERHEAD;
335  return CHUNK_TO_MEM(c);
336  }
337 
338  i = bin_index_up(n);
339  for (;;) {
340  uint64_t mask = mal.binmap & -(1ULL<<i);
341  if (!mask) {
342  c = expand_heap(n);
343  if (!c) return 0;
344  if (alloc_rev(c)) {
345  struct chunk *x = c;
346  c = PREV_CHUNK(c);
347  NEXT_CHUNK(x)->psize = c->csize =
348  x->csize + CHUNK_SIZE(c);
349  }
350  break;
351  }
352  j = first_set(mask);
353  lock_bin(j);
354  c = mal.bins[j].head;
355  if (c != BIN_TO_CHUNK(j)) {
356  if (!pretrim(c, n, i, j)) unbin(c, j);
357  unlock_bin(j);
358  break;
359  }
360  unlock_bin(j);
361  }
362 
363  /* Now patch up in case we over-allocated */
364  trim(c, n);
365 
366  return CHUNK_TO_MEM(c);
367 }
368 
369 void *__malloc0(size_t n)
370 {
371  void *p = malloc(n);
372  if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))) {
373  size_t *z;
374  n = (n + sizeof *z - 1)/sizeof *z;
375  for (z=p; n; n--, z++) if (*z) *z=0;
376  }
377  return p;
378 }
379 
380 void *realloc(void *p, size_t n)
381 {
382  struct chunk *self, *next;
383  size_t n0, n1;
384  void *new;
385 
386  if (!p) return malloc(n);
387 
388  if (adjust_size(&n) < 0) return 0;
389 
390  self = MEM_TO_CHUNK(p);
391  n1 = n0 = CHUNK_SIZE(self);
392 
393  if (IS_MMAPPED(self)) {
394  size_t extra = self->psize;
395  char *base = (char *)self - extra;
396  size_t oldlen = n0 + extra;
397  size_t newlen = n + extra;
398  /* Crash on realloc of freed chunk */
399  if (extra & 1) a_crash();
400  if (newlen < PAGE_SIZE && (new = malloc(n))) {
401  memcpy(new, p, n-OVERHEAD);
402  free(p);
403  return new;
404  }
405  newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
406  if (oldlen == newlen) return p;
407  base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
408  if (base == (void *)-1)
409  goto copy_realloc;
410  self = (void *)(base + extra);
411  self->csize = newlen - extra;
412  return CHUNK_TO_MEM(self);
413  }
414 
415  next = NEXT_CHUNK(self);
416 
417  /* Crash on corrupted footer (likely from buffer overflow) */
418  if (next->psize != self->csize) a_crash();
419 
420  /* Merge adjacent chunks if we need more space. This is not
421  * a waste of time even if we fail to get enough space, because our
422  * subsequent call to free would otherwise have to do the merge. */
423  if (n > n1 && alloc_fwd(next)) {
424  n1 += CHUNK_SIZE(next);
425  next = NEXT_CHUNK(next);
426  }
427  /* FIXME: find what's wrong here and reenable it..? */
428  if (0 && n > n1 && alloc_rev(self)) {
429  self = PREV_CHUNK(self);
430  n1 += CHUNK_SIZE(self);
431  }
432  self->csize = n1 | C_INUSE;
433  next->psize = n1 | C_INUSE;
434 
435  /* If we got enough space, split off the excess and return */
436  if (n <= n1) {
437  //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
438  trim(self, n);
439  return CHUNK_TO_MEM(self);
440  }
441 
442 copy_realloc:
443  /* As a last resort, allocate a new chunk and copy to it. */
444  new = malloc(n-OVERHEAD);
445  if (!new) return 0;
446  memcpy(new, p, n0-OVERHEAD);
447  free(CHUNK_TO_MEM(self));
448  return new;
449 }
450 
451 void free(void *p)
452 {
453  struct chunk *self = MEM_TO_CHUNK(p);
454  struct chunk *next;
455  size_t final_size, new_size, size;
456  int reclaim=0;
457  int i;
458 
459  if (!p) return;
460 
461  if (IS_MMAPPED(self)) {
462  size_t extra = self->psize;
463  char *base = (char *)self - extra;
464  size_t len = CHUNK_SIZE(self) + extra;
465  /* Crash on double free */
466  if (extra & 1) a_crash();
467  __munmap(base, len);
468  return;
469  }
470 
471  final_size = new_size = CHUNK_SIZE(self);
472  next = NEXT_CHUNK(self);
473 
474  /* Crash on corrupted footer (likely from buffer overflow) */
475  if (next->psize != self->csize) a_crash();
476 
477  for (;;) {
478  if (self->psize & next->csize & C_INUSE) {
479  self->csize = final_size | C_INUSE;
480  next->psize = final_size | C_INUSE;
481  i = bin_index(final_size);
482  lock_bin(i);
483  lock(mal.free_lock);
484  if (self->psize & next->csize & C_INUSE)
485  break;
486  unlock(mal.free_lock);
487  unlock_bin(i);
488  }
489 
490  if (alloc_rev(self)) {
491  self = PREV_CHUNK(self);
492  size = CHUNK_SIZE(self);
493  final_size += size;
494  if (new_size+size > RECLAIM && (new_size+size^size) > size)
495  reclaim = 1;
496  }
497 
498  if (alloc_fwd(next)) {
499  size = CHUNK_SIZE(next);
500  final_size += size;
501  if (new_size+size > RECLAIM && (new_size+size^size) > size)
502  reclaim = 1;
503  next = NEXT_CHUNK(next);
504  }
505  }
506 
507  if (!(mal.binmap & 1ULL<<i))
508  a_or_64(&mal.binmap, 1ULL<<i);
509 
510  self->csize = final_size;
511  next->psize = final_size;
512  unlock(mal.free_lock);
513 
514  self->next = BIN_TO_CHUNK(i);
515  self->prev = mal.bins[i].tail;
516  self->next->prev = self;
517  self->prev->next = self;
518 
519  /* Replace middle of large chunks with fresh zero pages */
520  if (reclaim) {
521  uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
522  uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
523 #if 1
524  __madvise((void *)a, b-a, MADV_DONTNEED);
525 #else
526  __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
528 #endif
529  }
530 
531  unlock_bin(i);
532 }
size_t len
Definition: 6502dis.c:15
#define mask()
lzma_index ** i
Definition: index.h:629
#define w
Definition: crypto_rc6.c:13
const char * k
Definition: dsignal.c:11
voidpf void uLong size
Definition: ioapi.h:138
void * p
Definition: libc.cpp:67
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
void * realloc(void *ptr, size_t size)
Definition: malloc.c:144
void * malloc(size_t size)
Definition: malloc.c:123
int x
Definition: mipsasm.c:20
int n
Definition: mipsasm.c:19
#define PROT_READ
Definition: sftypes.h:95
#define PROT_WRITE
Definition: sftypes.h:96
#define MAP_FIXED
Definition: sftypes.h:104
#define MAP_PRIVATE
Definition: sftypes.h:102
unsigned int uint32_t
Definition: sftypes.h:29
#define ENOMEM
Definition: sftypes.h:122
unsigned long uint64_t
Definition: sftypes.h:28
int off_t
Definition: sftypes.h:41
#define MAP_ANONYMOUS
Definition: sftypes.h:106
#define b(i)
Definition: sha256.c:42
#define c(i)
Definition: sha256.c:43
#define a(i)
Definition: sha256.c:41
_W64 unsigned int uintptr_t
#define PTRDIFF_MAX
Definition: malloc.c:26
struct chunk * tail
Definition: malloc.c:29
volatile int lock[2]
Definition: malloc.c:27
struct chunk * head
Definition: malloc.c:28
Definition: malloc.c:21
struct chunk * prev
Definition: malloc.c:23
size_t csize
Definition: malloc.c:22
size_t psize
Definition: malloc.c:22
struct chunk * next
Definition: malloc.c:23
static struct chunk * expand_heap(size_t n)
Definition: malloc.c:162
static struct @626 mal
#define MEM_TO_CHUNK(p)
Definition: malloc.c:50
int __munmap(void *, size_t)
#define SIZE_MASK
Definition: malloc.c:40
#define OVERHEAD
Definition: malloc.c:41
#define SIZE_ALIGN
Definition: malloc.c:39
void * __mremap(void *, size_t, size_t, int,...)
#define MMAP_THRESHOLD
Definition: malloc.c:42
#define IS_MMAPPED(c)
Definition: malloc.c:56
static void unlock(volatile int *lk)
Definition: malloc.c:67
#define CHUNK_SIZE(c)
Definition: malloc.c:46
#define PREV_CHUNK(c)
Definition: malloc.c:48
static void unlock_bin(int i)
Definition: malloc.c:82
#define DONTCARE
Definition: malloc.c:43
#define NEXT_CHUNK(c)
Definition: malloc.c:49
static int pretrim(struct chunk *self, size_t n, int i, int j)
Definition: malloc.c:272
void free(void *p)
Definition: malloc.c:451
volatile uint64_t binmap
Definition: malloc.c:33
static int alloc_rev(struct chunk *c)
Definition: malloc.c:251
#define C_INUSE
Definition: malloc.c:54
static int bin_index(size_t x)
Definition: malloc.c:121
static int bin_index_up(size_t x)
Definition: malloc.c:130
void * __expand_heap(size_t *)
#define CHUNK_TO_MEM(c)
Definition: malloc.c:51
static int first_set(uint64_t x)
Definition: malloc.c:87
static void lock(volatile int *lk)
Definition: malloc.c:61
static void trim(struct chunk *self, size_t n)
Definition: malloc.c:302
static void unbin(struct chunk *c, int i)
Definition: malloc.c:224
static int alloc_fwd(struct chunk *c)
Definition: malloc.c:234
#define RECLAIM
Definition: malloc.c:44
void * __malloc0(size_t n)
Definition: malloc.c:369
static int adjust_size(size_t *n)
Definition: malloc.c:208
int __madvise(void *, size_t, int)
void * __mmap(void *, size_t, int, int, int, off_t)
#define BIN_TO_CHUNK(i)
Definition: malloc.c:52
static const unsigned char bin_tab[60]
Definition: malloc.c:114
static void lock_bin(int i)
Definition: malloc.c:75
struct bin bins[64]
Definition: malloc.c:34
volatile int free_lock[2]
Definition: malloc.c:35