Rizin
unix-like reverse engineering framework and cli tools
linux_heap_glibc.c
Go to the documentation of this file.
1 // SPDX-FileCopyrightText: 2016-2020 n4x0r <kalianon2816@gmail.com>
2 // SPDX-FileCopyrightText: 2016-2020 soez <soez@amn3s1a.com>
3 // SPDX-FileCopyrightText: 2016-2020 pancake <pancake@nopcode.org>
4 // SPDX-License-Identifier: LGPL-3.0-only
5 
6 #include <rz_core.h>
7 #include <rz_config.h>
8 #include <rz_types.h>
9 #include <math.h>
10 
11 #include "core_private.h"
12 
13 #ifdef HEAP64
14 #include "linux_heap_glibc64.h"
15 #else
16 #include "linux_heap_glibc.h"
17 #endif
18 
30 static GHT GH(get_va_symbol)(RzCore *core, const char *path, const char *sym_name) {
31  GHT vaddr = GHT_MAX;
32  RzBin *bin = core->bin;
33  RzBinFile *current_bf = rz_bin_cur(bin);
35  RzBinSymbol *s;
36 
37  RzBinOptions opt;
38  rz_bin_options_init(&opt, -1, 0, 0, false);
39  opt.obj_opts.elf_load_sections = rz_config_get_b(core->config, "elf.load.sections");
40  opt.obj_opts.elf_checks_sections = rz_config_get_b(core->config, "elf.checks.sections");
41  opt.obj_opts.elf_checks_segments = rz_config_get_b(core->config, "elf.checks.segments");
42 
43  RzBinFile *libc_bf = rz_bin_open(bin, path, &opt);
44  if (!libc_bf) {
45  return vaddr;
46  }
47 
48  RzList *syms = rz_bin_get_symbols(bin);
49  rz_list_foreach (syms, iter, s) {
50  if (!strcmp(s->name, sym_name)) {
51  vaddr = s->vaddr;
52  break;
53  }
54  }
55 
56  rz_bin_file_delete(bin, libc_bf);
57  rz_bin_file_set_cur_binfile(bin, current_bf);
58  return vaddr;
59 }
60 
61 static inline GHT GH(align_address_to_size)(ut64 addr, ut64 align) {
62  return addr + ((align - (addr % align)) % align);
63 }
64 
65 static inline GHT GH(get_next_pointer)(RzCore *core, GHT pos, GHT next) {
66  return (core->dbg->glibc_version < 232) ? next : (GHT)((pos >> 12) ^ next);
67 }
68 
71  GHT base_addr = map->addr;
72  rz_return_val_if_fail(base_addr != GHT_MAX, GHT_MAX);
73 
74  GHT main_arena = GHT_MAX;
75  GHT vaddr = GHT_MAX;
76  char *path = strdup(map->name);
77  if (path && rz_file_exists(path)) {
78  vaddr = GH(get_va_symbol)(core, path, "main_arena");
79  if (vaddr != GHT_MAX) {
80  main_arena = base_addr + vaddr;
81  } else {
82  vaddr = GH(get_va_symbol)(core, path, "__malloc_hook");
83  if (vaddr == GHT_MAX) {
84  return main_arena;
85  }
86  RzBinInfo *info = rz_bin_get_info(core->bin);
87  if (!strcmp(info->arch, "x86")) {
88  main_arena = GH(align_address_to_size)(vaddr + base_addr + sizeof(GHT), 0x20);
89  } else if (!strcmp(info->arch, "arm")) {
90  main_arena = vaddr + base_addr - sizeof(GHT) * 2 - sizeof(MallocState);
91  }
92  }
93  }
94  free(path);
95  return main_arena;
96 }
97 
98 static bool GH(is_tcache)(RzCore *core) {
99  // NOTE This method of resolving libc fails in the following cases:
100  // 1. libc shared object file does not have version number
101  // 2. if another map has `libc-` in its absolute path
102  char *fp = NULL;
103  double v = 0;
104  if (rz_config_get_b(core->config, "cfg.debug")) {
105  RzDebugMap *map;
106  RzListIter *iter;
107  rz_debug_map_sync(core->dbg);
108  rz_list_foreach (core->dbg->maps, iter, map) {
109  // In case the binary is named *libc-*
110  if (strncmp(map->name, core->bin->file, strlen(map->name)) != 0) {
111  fp = strstr(map->name, "libc-");
112  if (fp) {
113  break;
114  }
115  }
116  }
117  } else {
118  int tcv = rz_config_get_i(core->config, "dbg.glibc.tcache");
119  eprintf("dbg.glibc.tcache = %i\n", tcv);
120  return tcv != 0;
121  }
122  if (fp) {
123 
124  // In case there is string `libc-` in path actual libc go to last occurrence of `libc-`
125  while (strstr(fp + 1, "libc-") != NULL) {
126  fp = strstr(fp + 1, "libc-");
127  }
128 
129  v = rz_num_get_float(NULL, fp + 5);
130  core->dbg->glibc_version = (int)round((v * 100));
131  }
132  return (v > 2.25);
133 }
134 
135 static GHT GH(tcache_chunk_size)(RzCore *core, GHT brk_start) {
136  GHT sz = 0;
137 
138  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
139  if (!cnk) {
140  return sz;
141  }
142  rz_io_read_at(core->io, brk_start, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
143  sz = (cnk->size >> 3) << 3; // clear chunk flag
144  return sz;
145 }
146 
147 static void GH(update_arena_with_tc)(GH(RzHeap_MallocState_tcache) * cmain_arena, MallocState *main_arena) {
148  int i = 0;
149  main_arena->mutex = cmain_arena->mutex;
150  main_arena->flags = cmain_arena->flags;
151  for (i = 0; i < BINMAPSIZE; i++) {
152  main_arena->binmap[i] = cmain_arena->binmap[i];
153  }
154  main_arena->have_fast_chunks = cmain_arena->have_fast_chunks;
155  main_arena->attached_threads = cmain_arena->attached_threads;
156  for (i = 0; i < NFASTBINS; i++) {
157  main_arena->fastbinsY[i] = cmain_arena->fastbinsY[i];
158  }
159  main_arena->top = cmain_arena->top;
160  main_arena->last_remainder = cmain_arena->last_remainder;
161  for (i = 0; i < NBINS * 2 - 2; i++) {
162  main_arena->bins[i] = cmain_arena->bins[i];
163  }
164  main_arena->next = cmain_arena->next;
165  main_arena->next_free = cmain_arena->next_free;
166  main_arena->system_mem = cmain_arena->system_mem;
167  main_arena->max_system_mem = cmain_arena->max_system_mem;
168 }
169 
170 static void GH(update_arena_without_tc)(GH(RzHeap_MallocState) * cmain_arena, MallocState *main_arena) {
171  int i = 0;
172  main_arena->mutex = cmain_arena->mutex;
173  main_arena->flags = cmain_arena->flags;
174  for (i = 0; i < BINMAPSIZE; i++) {
175  main_arena->binmap[i] = cmain_arena->binmap[i];
176  }
177  main_arena->attached_threads = 1;
178  for (i = 0; i < NFASTBINS; i++) {
179  main_arena->fastbinsY[i] = cmain_arena->fastbinsY[i];
180  }
181  main_arena->top = cmain_arena->top;
182  main_arena->last_remainder = cmain_arena->last_remainder;
183  for (i = 0; i < NBINS * 2 - 2; i++) {
184  main_arena->bins[i] = cmain_arena->bins[i];
185  }
186  main_arena->next = cmain_arena->next;
187  main_arena->next_free = cmain_arena->next_free;
188  main_arena->system_mem = cmain_arena->system_mem;
189  main_arena->max_system_mem = cmain_arena->max_system_mem;
190 }
191 
199 RZ_API bool GH(rz_heap_update_main_arena)(RzCore *core, GHT m_arena, MallocState *main_arena) {
200  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
201  if (tcache) {
202  GH(RzHeap_MallocState_tcache) *cmain_arena = RZ_NEW0(GH(RzHeap_MallocState_tcache));
203  if (!cmain_arena) {
204  return false;
205  }
206  (void)rz_io_read_at(core->io, m_arena, (ut8 *)cmain_arena, sizeof(GH(RzHeap_MallocState_tcache)));
208  (cmain_arena, main_arena);
209  } else {
210  GH(RzHeap_MallocState) *cmain_arena = RZ_NEW0(GH(RzHeap_MallocState));
211  if (!cmain_arena) {
212  return false;
213  }
214  (void)rz_io_read_at(core->io, m_arena, (ut8 *)cmain_arena, sizeof(GH(RzHeap_MallocState)));
216  (cmain_arena, main_arena);
217  }
218  return true;
219 }
220 
221 static void GH(get_brks)(RzCore *core, GHT *brk_start, GHT *brk_end) {
222  if (rz_config_get_b(core->config, "cfg.debug")) {
223  RzListIter *iter;
224  RzDebugMap *map;
225  rz_debug_map_sync(core->dbg);
226  rz_list_foreach (core->dbg->maps, iter, map) {
227  if (map->name) {
228  if (strstr(map->name, "[heap]")) {
229  *brk_start = map->addr;
230  *brk_end = map->addr_end;
231  break;
232  }
233  }
234  }
235  } else {
236  void **it;
237  RzPVector *maps = rz_io_maps(core->io);
238  rz_pvector_foreach (maps, it) {
239  RzIOMap *map = *it;
240  if (map->name) {
241  if (strstr(map->name, "[heap]")) {
242  *brk_start = map->itv.addr;
243  *brk_end = map->itv.addr + map->itv.size;
244  break;
245  }
246  }
247  }
248  }
249 }
250 
251 static void GH(print_arena_stats)(RzCore *core, GHT m_arena, MallocState *main_arena, GHT global_max_fast, int format) {
252  size_t i, j, k, start;
253  GHT align = 12 * SZ + sizeof(int) * 2;
254  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
256 
257  if (tcache) {
258  align = 16;
259  }
260 
261  GHT apart[NSMALLBINS + 1] = { 0LL };
262  if (format == RZ_OUTPUT_MODE_RIZIN) {
263  for (i = 0; i < NBINS * 2 - 2; i += 2) {
264  GHT addr = m_arena + align + SZ * i - SZ * 2;
265  GHT bina = main_arena->bins[i];
266  rz_cons_printf("f chunk.%zu.bin @ 0x%" PFMT64x "\n", i, (ut64)addr);
267  rz_cons_printf("f chunk.%zu.fd @ 0x%" PFMT64x "\n", i, (ut64)bina);
268  bina = main_arena->bins[i + 1];
269  rz_cons_printf("f chunk.%zu.bk @ 0x%" PFMT64x "\n", i, (ut64)bina);
270  }
271  for (i = 0; i < BINMAPSIZE; i++) {
272  rz_cons_printf("f binmap.%zu @ 0x%" PFMT64x, i, (ut64)main_arena->binmap[i]);
273  }
274  { /* maybe use SDB instead of flags for this? */
275  char units[8];
276  rz_num_units(units, sizeof(units), main_arena->max_system_mem);
277  rz_cons_printf("f heap.maxmem @ %s\n", units);
278 
279  rz_num_units(units, sizeof(units), main_arena->system_mem);
280  rz_cons_printf("f heap.sysmem @ %s\n", units);
281 
282  rz_num_units(units, sizeof(units), main_arena->next_free);
283  rz_cons_printf("f heap.nextfree @ %s\n", units);
284 
285  rz_num_units(units, sizeof(units), main_arena->next);
286  rz_cons_printf("f heap.next @ %s\n", units);
287  }
288  return;
289  }
290 
291  PRINT_GA("malloc_state @ ");
292  PRINTF_BA("0x%" PFMT64x "\n\n", (ut64)m_arena);
293  PRINT_GA("struct malloc_state main_arena {\n");
294  PRINT_GA(" mutex = ");
295  PRINTF_BA("0x%08x\n", (ut32)main_arena->mutex);
296  PRINT_GA(" flags = ");
297  PRINTF_BA("0x%08x\n", (ut32)main_arena->flags);
298  PRINT_GA(" fastbinsY = {\n");
299 
300  for (i = 0, j = 1, k = SZ * 4; i < NFASTBINS; i++, j++, k += SZ * 2) {
301  if (FASTBIN_IDX_TO_SIZE(j) <= global_max_fast) {
302  PRINTF_YA(" Fastbin %02zu\n", j);
303  } else {
304  PRINTF_RA(" Fastbin %02zu\n", j);
305  }
306  PRINT_GA(" chunksize:");
307  PRINTF_BA(" == %04zu ", k);
308  PRINTF_GA("0x%" PFMT64x, (ut64)main_arena->fastbinsY[i]);
309  PRINT_GA(",\n");
310  }
311  PRINT_GA("}\n");
312  PRINT_GA(" top = ");
313  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->top);
314  PRINT_GA(",\n");
315  PRINT_GA(" last_remainder = ");
316  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->last_remainder);
317  PRINT_GA(",\n");
318  PRINT_GA(" bins {\n");
319 
320  /* Index & size for largebins */
321  start = SZ * 128;
322  for (i = start, k = 0, j = 0; j < NBINS - 2 && i < 1024 * 1024; i += 64) {
323  j = largebin_index(i);
324  if (j == k + NSMALLBINS + 1) {
325  apart[k++] = i;
326  }
327  }
328  for (i = 0, j = 1, k = SZ * 4; i < NBINS * 2 - 2; i += 2, j++) {
329  PRINTF_YA(" Bin %03zu: ", j);
330  if (j == 1) {
331  PRINT_GA("Unsorted Bin");
332  PRINT_GA(" [");
333  PRINT_GA(" chunksize:");
334  PRINT_BA(" undefined ");
335  } else if (j > 1 && j <= NSMALLBINS) {
336  if (j == 2) {
337  PRINT_GA(" ┌");
338  } else if (j == (NSMALLBINS / 2)) {
339  PRINT_GA(" Small Bins │");
340  } else if (j != 2 && j != (NSMALLBINS / 2) && j != NSMALLBINS) {
341  PRINT_GA(" │");
342  } else {
343  PRINT_GA(" â””");
344  }
345  PRINT_GA(" chunksize:");
346  PRINTF_BA(" == %06zu ", k);
347  if (j < NSMALLBINS) {
348  k += SZ * 2;
349  }
350  } else {
351  if (j == NSMALLBINS + 1) {
352  PRINT_GA(" ┌");
353  } else if (j == (NSMALLBINS / 2) * 3) {
354  PRINT_GA(" Large Bins │");
355  } else if (j != NSMALLBINS + 1 && j != (NSMALLBINS / 2) * 3 && j != NBINS - 1) {
356  PRINT_GA(" │");
357  } else {
358  PRINT_GA(" â””");
359  }
360  PRINT_GA(" chunksize:");
361  if (j != NBINS - 1) {
362  PRINTF_BA(" >= %06" PFMT64d " ", (ut64)apart[j - NSMALLBINS - 1]);
363  } else {
364  PRINT_BA(" remaining ");
365  }
366  }
367  GHT bin = m_arena + align + SZ * i - SZ * 2;
368  PRINTF_GA("0x%" PFMT64x "->fd = ", (ut64)bin);
369  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->bins[i]);
370  PRINT_GA(", ");
371  PRINTF_GA("0x%" PFMT64x "->bk = ", (ut64)bin);
372  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->bins[i + 1]);
373  PRINT_GA(", ");
374  rz_cons_newline();
375  }
376 
377  PRINT_GA(" }\n");
378  PRINT_GA(" binmap = {");
379 
380  for (i = 0; i < BINMAPSIZE; i++) {
381  if (i) {
382  PRINT_GA(",");
383  }
384  PRINTF_BA("0x%x", (ut32)main_arena->binmap[i]);
385  }
386  PRINT_GA("}\n");
387  PRINT_GA(" next = ");
388  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->next);
389  PRINT_GA(",\n");
390  PRINT_GA(" next_free = ");
391  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->next_free);
392  PRINT_GA(",\n");
393  PRINT_GA(" system_mem = ");
394  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->system_mem);
395  PRINT_GA(",\n");
396  PRINT_GA(" max_system_mem = ");
397  PRINTF_BA("0x%" PFMT64x, (ut64)main_arena->max_system_mem);
398  PRINT_GA(",\n");
399  PRINT_GA("}\n\n");
400 }
401 
409  rz_return_val_if_fail(core && core->dbg && core->dbg->maps, false);
410 
411  GHT brk_start = GHT_MAX, brk_end = GHT_MAX;
412  GHT libc_addr_sta = GHT_MAX, libc_addr_end = 0;
413  GHT addr_srch = GHT_MAX, heap_sz = GHT_MAX;
414  GHT main_arena_sym = GHT_MAX;
415  bool is_debugged = rz_config_get_b(core->config, "cfg.debug");
416  bool first_libc = true;
417  rz_config_set_i(core->config, "dbg.glibc.tcache", GH(is_tcache)(core));
418 
419  if (is_debugged) {
420  RzListIter *iter;
421  RzDebugMap *map;
422  rz_debug_map_sync(core->dbg);
423  rz_list_foreach (core->dbg->maps, iter, map) {
424  /* Try to find the main arena address using the glibc's symbols. */
425  if (strstr(map->name, "/libc-") && first_libc && main_arena_sym == GHT_MAX) {
426  first_libc = false;
427  main_arena_sym = GH(get_main_arena_with_symbol)(core, map);
428  }
429  if (strstr(map->name, "/libc-") && map->perm == RZ_PERM_RW) {
430  libc_addr_sta = map->addr;
431  libc_addr_end = map->addr_end;
432  break;
433  }
434  }
435  } else {
436  void **it;
437  RzPVector *maps = rz_io_maps(core->io);
438  rz_pvector_foreach (maps, it) {
439  RzIOMap *map = *it;
440  if (map->name && strstr(map->name, "arena")) {
441  libc_addr_sta = map->itv.addr;
442  libc_addr_end = map->itv.addr + map->itv.size;
443  break;
444  }
445  }
446  }
447 
448  if (libc_addr_sta == GHT_MAX || libc_addr_end == GHT_MAX) {
449  if (rz_config_get_b(core->config, "cfg.debug")) {
450  eprintf("Warning: Can't find glibc mapped in memory (see dm)\n");
451  } else {
452  eprintf("Warning: Can't find arena mapped in memory (see om)\n");
453  }
454  return false;
455  }
456 
457  GH(get_brks)
458  (core, &brk_start, &brk_end);
459  if (brk_start == GHT_MAX || brk_end == GHT_MAX) {
460  eprintf("No Heap section\n");
461  return false;
462  }
463 
464  addr_srch = libc_addr_sta;
465  heap_sz = brk_end - brk_start;
467  if (!ta) {
468  return false;
469  }
470 
471  if (main_arena_sym != GHT_MAX) {
473  (core, main_arena_sym, ta);
474  *m_arena = main_arena_sym;
475  core->dbg->main_arena_resolved = true;
476  free(ta);
477  return true;
478  }
479  while (addr_srch < libc_addr_end) {
481  (core, addr_srch, ta);
482  if (ta->top > brk_start && ta->top < brk_end &&
483  ta->system_mem == heap_sz) {
484 
485  *m_arena = addr_srch;
486  free(ta);
487  if (is_debugged) {
488  core->dbg->main_arena_resolved = true;
489  }
490  return true;
491  }
492  addr_srch += sizeof(GHT);
493  }
494  eprintf("Warning: Can't find main_arena in mapped memory\n");
495  free(ta);
496  return false;
497 }
498 
500  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
502 
503  if (!cnk) {
504  return;
505  }
506 
507  (void)rz_io_read_at(core->io, chunk, (ut8 *)cnk, sizeof(*cnk));
508 
509  PRINT_GA("struct malloc_chunk @ ");
510  PRINTF_BA("0x%" PFMT64x, (ut64)chunk);
511  PRINT_GA(" {\n prev_size = ");
512  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->prev_size);
513  PRINT_GA(",\n size = ");
514  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->size & ~(NON_MAIN_ARENA | IS_MMAPPED | PREV_INUSE));
515  PRINT_GA(",\n flags: |N:");
516  PRINTF_BA("%1" PFMT64u, (ut64)(cnk->size & NON_MAIN_ARENA) >> 2);
517  PRINT_GA(" |M:");
518  PRINTF_BA("%1" PFMT64u, (ut64)(cnk->size & IS_MMAPPED) >> 1);
519  PRINT_GA(" |P:");
520  PRINTF_BA("%1" PFMT64u, (ut64)cnk->size & PREV_INUSE);
521 
522  PRINT_GA(",\n fd = ");
523  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->fd);
524 
525  PRINT_GA(",\n bk = ");
526  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->bk);
527 
528  if (cnk->size > SZ * 128) {
529  PRINT_GA(",\n fd-nextsize = ");
530  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->fd_nextsize);
531  PRINT_GA(",\n bk-nextsize = ");
532  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->bk_nextsize);
533  }
534 
535  PRINT_GA(",\n}\n");
536  GHT size = ((cnk->size >> 3) << 3) - SZ * 2;
537  if (size > SZ * 128) {
538  PRINT_GA("chunk too big to be displayed\n");
539  size = SZ * 128;
540  }
541 
542  char *data = calloc(1, size);
543  if (data) {
544  rz_io_read_at(core->io, chunk + SZ * 2, (ut8 *)data, size);
545  PRINT_GA("chunk data = \n");
546  rz_core_print_hexdump(core, chunk + SZ * 2, (ut8 *)data, size, SZ * 8, SZ, 1);
547  free(data);
548  }
549  free(cnk);
550 }
551 
558 RZ_API GH(RzHeapChunk) * GH(rz_heap_get_chunk_at_addr)(RzCore *core, GHT addr) {
559  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
560  if (!cnk) {
561  return NULL;
562  }
563  (void)rz_io_nread_at(core->io, addr, (ut8 *)cnk, sizeof(*cnk));
564  return cnk;
565 }
566 
572 void GH(print_heap_chunk_simple)(RzCore *core, GHT chunk, const char *status, PJ *pj) {
573  GH(RzHeapChunk) *cnk = GH(rz_heap_get_chunk_at_addr)(core, chunk);
574  if (!cnk) {
575  return;
576  }
578  if (pj == NULL) {
579  PRINT_GA("Chunk");
580  rz_cons_printf("(");
581  if (status) {
582  rz_cons_printf("status=");
583  if (!strcmp(status, "free")) {
584  PRINTF_GA("%s", status);
585  rz_cons_printf("%-6s", ",");
586  } else {
587  rz_cons_printf("%s,", status);
588  }
589  rz_cons_printf(" ");
590  }
591  rz_cons_printf("addr=");
592  PRINTF_YA("0x%" PFMT64x, (ut64)chunk);
593  rz_cons_printf(", size=");
594  PRINTF_BA("0x%" PFMT64x, (ut64)cnk->size & ~(NON_MAIN_ARENA | IS_MMAPPED | PREV_INUSE));
595  rz_cons_printf(", flags=");
596  bool print_comma = false;
597  if (cnk->size & NON_MAIN_ARENA) {
598  PRINT_RA("NON_MAIN_ARENA");
599  print_comma = true;
600  }
601  if (cnk->size & IS_MMAPPED) {
602  if (print_comma) {
603  PRINT_RA(",");
604  }
605  PRINT_RA("IS_MMAPPED");
606  print_comma = true;
607  }
608  if (cnk->size & PREV_INUSE) {
609  if (print_comma) {
610  PRINT_RA(",");
611  }
612  PRINT_RA("PREV_INUSE");
613  }
614  rz_cons_printf(")");
615  } else {
616  pj_o(pj);
617  pj_kn(pj, "prev_size", cnk->prev_size);
618  pj_kn(pj, "addr", chunk);
619  pj_kn(pj, "size", (ut64)cnk->size & ~(NON_MAIN_ARENA | IS_MMAPPED | PREV_INUSE));
620  pj_kn(pj, "non_main_arena", cnk->size & NON_MAIN_ARENA);
621  pj_kn(pj, "mmapped", cnk->size & IS_MMAPPED);
622  pj_kn(pj, "prev_inuse", cnk->size & PREV_INUSE);
623  pj_kn(pj, "fd", cnk->fd);
624  pj_kn(pj, "bk", cnk->bk);
625  pj_end(pj);
626  }
627  free(cnk);
628 }
629 
630 static bool GH(is_arena)(RzCore *core, GHT m_arena, GHT m_state) {
631  if (m_arena == m_state) {
632  return true;
633  }
635  if (!ta) {
636  return false;
637  }
638  if (!GH(rz_heap_update_main_arena)(core, m_arena, ta)) {
639  free(ta);
640  return false;
641  }
642  if (ta->next == m_state) {
643  free(ta);
644  return true;
645  }
646  while (ta->next != GHT_MAX && ta->next != m_arena) {
647  if (!GH(rz_heap_update_main_arena)(core, ta->next, ta)) {
648  free(ta);
649  return false;
650  }
651  if (ta->next == m_state) {
652  free(ta);
653  return true;
654  }
655  }
656  free(ta);
657  return false;
658 }
659 
660 static int GH(print_double_linked_list_bin_simple)(RzCore *core, GHT bin, MallocState *main_arena, GHT brk_start) {
661  GHT next = GHT_MAX;
662  int ret = 1;
663  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
665 
666  if (!cnk) {
667  return -1;
668  }
669 
670  rz_io_read_at(core->io, bin, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
671 
672  PRINTF_GA(" 0x%" PFMT64x, (ut64)bin);
673  if (cnk->fd != bin) {
674  ret = 0;
675  }
676  while (cnk->fd != bin) {
677  PRINTF_BA("->fd = 0x%" PFMT64x, (ut64)cnk->fd);
678  next = cnk->fd;
679  if (next < brk_start || next > main_arena->top) {
680  PRINT_RA("Double linked list corrupted\n");
681  free(cnk);
682  return -1;
683  }
684  rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
685  }
686 
687  PRINTF_GA("->fd = 0x%" PFMT64x, (ut64)cnk->fd);
688  next = cnk->fd;
689 
690  if (next != bin) {
691  PRINT_RA("Double linked list corrupted\n");
692  free(cnk);
693  return -1;
694  }
695  (void)rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
696  PRINTF_GA("\n 0x%" PFMT64x, (ut64)bin);
697 
698  while (cnk->bk != bin) {
699  PRINTF_BA("->bk = 0x%" PFMT64x, (ut64)cnk->bk);
700  next = cnk->bk;
701  if (next < brk_start || next > main_arena->top) {
702  PRINT_RA("Double linked list corrupted.\n");
703  free(cnk);
704  return -1;
705  }
706  (void)rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
707  }
708 
709  PRINTF_GA("->bk = 0x%" PFMT64x, (ut64)cnk->bk);
710  free(cnk);
711  return ret;
712 }
713 
714 static int GH(print_double_linked_list_bin_graph)(RzCore *core, GHT bin, MallocState *main_arena, GHT brk_start) {
716  GHT next = GHT_MAX;
717  char title[256], chunk[256];
718  RzANode *bin_node = NULL, *prev_node = NULL, *next_node = NULL;
719  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
721 
722  if (!cnk || !g) {
723  free(cnk);
724  rz_agraph_free(g);
725  return -1;
726  }
727  g->can->color = rz_config_get_i(core->config, "scr.color");
728 
729  (void)rz_io_read_at(core->io, bin, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
730  snprintf(title, sizeof(title) - 1, "bin @ 0x%" PFMT64x "\n", (ut64)bin);
731  snprintf(chunk, sizeof(chunk) - 1, "fd: 0x%" PFMT64x "\nbk: 0x%" PFMT64x "\n",
732  (ut64)cnk->fd, (ut64)cnk->bk);
733  bin_node = rz_agraph_add_node(g, title, chunk);
734  prev_node = bin_node;
735 
736  while (cnk->bk != bin) {
737  next = cnk->bk;
738  if (next < brk_start || next > main_arena->top) {
739  PRINT_RA("Double linked list corrupted\n");
740  free(cnk);
741  free(g);
742  return -1;
743  }
744 
745  rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
746  snprintf(title, sizeof(title) - 1, "Chunk @ 0x%" PFMT64x "\n", (ut64)next);
747  snprintf(chunk, sizeof(chunk) - 1, "fd: 0x%" PFMT64x "\nbk: 0x%" PFMT64x "\n",
748  (ut64)cnk->fd, (ut64)cnk->bk);
749  next_node = rz_agraph_add_node(g, title, chunk);
750  rz_agraph_add_edge(g, prev_node, next_node);
751  rz_agraph_add_edge(g, next_node, prev_node);
752  prev_node = next_node;
753  }
754 
755  rz_agraph_add_edge(g, prev_node, bin_node);
756  rz_agraph_add_edge(g, bin_node, prev_node);
758 
759  free(cnk);
760  rz_agraph_free(g);
761  return 0;
762 }
763 
764 static int GH(print_double_linked_list_bin)(RzCore *core, MallocState *main_arena, GHT m_arena, GHT offset, GHT num_bin, int graph) {
765  if (!core || !core->dbg || !core->dbg->maps) {
766  return -1;
767  }
768  int ret = 0;
769  GHT brk_start = GHT_MAX, brk_end = GHT_MAX, initial_brk = GHT_MAX;
771 
772  if (num_bin > 126) {
773  return -1;
774  }
775  GHT bin = main_arena->bins[num_bin];
776 
777  if (!bin) {
778  return -1;
779  }
780 
781  GH(get_brks)
782  (core, &brk_start, &brk_end);
783  if (brk_start == GHT_MAX || brk_end == GHT_MAX) {
784  eprintf("No Heap section\n");
785  return -1;
786  }
787 
788  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
789  if (tcache) {
790  const int fc_offset = rz_config_get_i(core->config, "dbg.glibc.fc_offset");
791  bin = m_arena + offset + SZ * num_bin * 2 + 10 * SZ;
792  initial_brk = ((brk_start >> 12) << 12) + fc_offset;
793  } else {
794  bin = m_arena + offset + SZ * num_bin * 2 - SZ * 2;
795  initial_brk = (brk_start >> 12) << 12;
796  }
797 
798  if (num_bin == 0) {
799  PRINT_GA(" double linked list unsorted bin {\n");
800  } else if (num_bin >= 1 && num_bin <= NSMALLBINS - 1) {
801  PRINT_GA(" double linked list small bin {\n");
802  } else if (num_bin >= NSMALLBINS && num_bin <= NBINS - 2) {
803  PRINT_GA(" double linked list large bin {\n");
804  }
805 
806  if (!graph || graph == 1) {
807  ret = GH(print_double_linked_list_bin_simple)(core, bin, main_arena, initial_brk);
808  } else {
809  ret = GH(print_double_linked_list_bin_graph)(core, bin, main_arena, initial_brk);
810  }
811  PRINT_GA("\n }\n");
812  return ret;
813 }
814 
815 static void GH(print_heap_bin)(RzCore *core, GHT m_arena, MallocState *main_arena, const char *input) {
816  int i, j = 2;
817  GHT num_bin = GHT_MAX;
818  GHT offset;
820 
821  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
822  if (tcache) {
823  offset = 16;
824  } else {
825  offset = 12 * SZ + sizeof(int) * 2;
826  }
827 
828  switch (input[0]) {
829  case '\0': // dmhb
830  PRINT_YA("Bins {\n");
831  for (i = 0; i < NBINS - 1; i++) {
832  PRINTF_YA(" Bin %03d:\n", i);
834  (core, main_arena, m_arena, offset, i, 0);
835  }
836  PRINT_YA("\n}\n");
837  break;
838  case ' ': // dmhb [bin_num]
839  j--; // for spaces after input
840  // fallthrough
841  case 'g': // dmhbg [bin_num]
842  num_bin = rz_num_get(NULL, input + j);
843  if (num_bin > NBINS - 2) {
844  eprintf("Error: 0 <= bin <= %d\n", NBINS - 2);
845  break;
846  }
847  PRINTF_YA(" Bin %03" PFMT64u ":\n", (ut64)num_bin);
849  (core, main_arena, m_arena, offset, num_bin, j);
850  break;
851  }
852 }
853 
855  if (!item) {
856  return;
857  }
858  free(item->status);
859  free(item);
860 }
861 
862 RZ_API RzHeapBin *GH(rz_heap_fastbin_content)(RzCore *core, MallocState *main_arena, int bin_num) {
863  if (!core || !core->dbg || !core->dbg->maps) {
864  return NULL;
865  }
866  GHT brk_start = GHT_MAX, brk_end = GHT_MAX;
867  RzHeapBin *heap_bin = RZ_NEW0(RzHeapBin);
868  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
869  if (!cnk || !heap_bin) {
870  free(heap_bin);
871  free(cnk);
872  return NULL;
873  }
875  heap_bin->bin_num = bin_num + 1;
876  heap_bin->size = FASTBIN_IDX_TO_SIZE(bin_num + 1);
877  heap_bin->type = rz_str_new("Fast");
878  GHT next = main_arena->fastbinsY[bin_num];
879  if (!next) {
880  free(cnk);
881  return heap_bin;
882  }
883  GH(get_brks)
884  (core, &brk_start, &brk_end);
885  heap_bin->fd = next;
886  if (brk_start == GHT_MAX || brk_end == GHT_MAX) {
887  free(cnk);
888  return heap_bin;
889  }
890  GHT size = main_arena->top - brk_start;
891 
892  GHT next_root = next, next_tmp = next, double_free = GHT_MAX;
893  while (next && next >= brk_start && next < main_arena->top) {
895  if (!item) {
896  break;
897  }
898  item->addr = next;
899  item->status = rz_str_new("free");
900  rz_list_append(heap_bin->chunks, item);
901  while (double_free == GHT_MAX && next_tmp && next_tmp >= brk_start && next_tmp <= main_arena->top) {
902  rz_io_read_at(core->io, next_tmp, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
903  next_tmp = GH(get_next_pointer)(core, next_tmp, cnk->fd);
904  if (cnk->prev_size > size || ((cnk->size >> 3) << 3) > size) {
905  break;
906  }
907  if (next_root == next_tmp) {
908  double_free = next_root;
909  break;
910  }
911  }
912  rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
913  next = GH(get_next_pointer)(core, next, cnk->fd);
914  if (cnk->prev_size > size || ((cnk->size >> 3) << 3) > size) {
915  char message[50];
916  rz_snprintf(message, 50, "Linked list corrupted @ 0x%" PFMT64x, (ut64)next);
917  heap_bin->message = rz_str_new(message);
918  free(cnk);
919  return heap_bin;
920  }
921 
922  next_root = next_tmp = next;
923  if (double_free == next) {
924  char message[50];
925  rz_snprintf(message, 50, "Double free detected @ 0x%" PFMT64x, (ut64)next);
926  heap_bin->message = rz_str_new(message);
927  free(cnk);
928  return heap_bin;
929  }
930  }
931  if (next && (next < brk_start || next >= main_arena->top)) {
932  char message[50];
933  rz_snprintf(message, 50, "Linked list corrupted @ 0x%" PFMT64x, (ut64)next);
934  heap_bin->message = rz_str_new(message);
935  free(cnk);
936  return heap_bin;
937  }
938  free(cnk);
939  return heap_bin;
940 }
941 
942 void GH(print_heap_fastbin)(RzCore *core, GHT m_arena, MallocState *main_arena, GHT global_max_fast, const char *input, bool main_arena_only, PJ *pj) {
944  int fastbins_max = rz_config_get_i(core->config, "dbg.glibc.fastbinmax") - 1;
945  int global_max_fast_idx = fastbin_index(global_max_fast);
946  int fastbin_count = fastbins_max < global_max_fast_idx ? fastbins_max : global_max_fast_idx;
947  int bin_to_print = 0;
948  switch (input[0]) {
949  case ' ':
950  bin_to_print = (int)rz_num_get(NULL, input);
951  if (bin_to_print <= 0 || bin_to_print - 1 > fastbin_count) {
952  eprintf("Error: 0 < bin <= %d\n", fastbin_count + 1);
953  return;
954  }
955  }
956  if (!pj) {
957  rz_cons_printf("Fast bins in Arena @ ");
958  PRINTF_YA("0x%" PFMT64x, (ut64)m_arena);
959  rz_cons_newline();
960  }
961  for (int i = 0; i <= fastbin_count; i++) {
962  if (bin_to_print && i != bin_to_print - 1) {
963  continue;
964  }
965  RzHeapBin *bin = GH(rz_heap_fastbin_content)(core, main_arena, i);
966  if (!bin) {
967  continue;
968  }
969  if (!pj) {
970  rz_cons_printf("Fast_bin[");
971  PRINTF_BA("%02zu", (size_t)bin->bin_num);
972  rz_cons_printf("] [size: ");
973  PRINTF_BA("0x%" PFMT64x, bin->size);
974  rz_cons_printf("]");
975  } else {
976  pj_o(pj);
977  pj_ks(pj, "bin_type", "fast");
978  pj_kn(pj, "bin_num", bin->bin_num);
979  pj_ka(pj, "chunks");
980  }
981  if (!bin->chunks || !rz_list_length(bin->chunks)) {
982  if (!pj) {
983  PRINT_RA(" Empty bin\n");
984  }
985  } else {
986  RzListIter *iter;
988  rz_cons_newline();
989  rz_list_foreach (bin->chunks, iter, pos) {
990  if (!pj) {
991  rz_cons_printf(" -> ");
992  }
994  (core, pos->addr, NULL, pj);
995  if (!pj) {
996  rz_cons_newline();
997  }
998  }
999  if (bin->message && !pj) {
1000  PRINTF_RA("%s\n", bin->message);
1001  }
1002  }
1003  if (pj) {
1004  pj_end(pj);
1005  pj_end(pj);
1006  }
1008  (bin);
1009  }
1010 }
1011 
1012 static GH(RTcache) * GH(tcache_new)(RzCore *core) {
1013  rz_return_val_if_fail(core, NULL);
1014  GH(RTcache) *tcache = RZ_NEW0(GH(RTcache));
1015  if (!tcache) {
1016  return NULL;
1017  }
1018  if (core->dbg->glibc_version >= TCACHE_NEW_VERSION) {
1019  tcache->type = NEW;
1020  tcache->RzHeapTcache.heap_tcache = RZ_NEW0(GH(RzHeapTcache));
1021  } else {
1022  tcache->type = OLD;
1023  tcache->RzHeapTcache.heap_tcache_pre_230 = RZ_NEW0(GH(RzHeapTcachePre230));
1024  }
1025  return tcache;
1026 }
1027 
1028 RZ_API void GH(tcache_free)(GH(RTcache) * tcache) {
1029  rz_return_if_fail(tcache);
1030  tcache->type == NEW
1031  ? free(tcache->RzHeapTcache.heap_tcache)
1032  : free(tcache->RzHeapTcache.heap_tcache_pre_230);
1033  free(tcache);
1034 }
1035 
1036 static bool GH(tcache_read)(RzCore *core, GHT tcache_start, GH(RTcache) * tcache) {
1037  rz_return_val_if_fail(core && tcache, false);
1038  return tcache->type == NEW
1039  ? rz_io_read_at(core->io, tcache_start, (ut8 *)tcache->RzHeapTcache.heap_tcache, sizeof(GH(RzHeapTcache)))
1040  : rz_io_read_at(core->io, tcache_start, (ut8 *)tcache->RzHeapTcache.heap_tcache_pre_230, sizeof(GH(RzHeapTcachePre230)));
1041 }
1042 
1043 static int GH(tcache_get_count)(GH(RTcache) * tcache, int index) {
1044  rz_return_val_if_fail(tcache, 0);
1045  return tcache->type == NEW
1046  ? tcache->RzHeapTcache.heap_tcache->counts[index]
1047  : tcache->RzHeapTcache.heap_tcache_pre_230->counts[index];
1048 }
1049 
1050 static GHT GH(tcache_get_entry)(GH(RTcache) * tcache, int index) {
1051  rz_return_val_if_fail(tcache, 0);
1052  return tcache->type == NEW
1053  ? tcache->RzHeapTcache.heap_tcache->entries[index]
1054  : tcache->RzHeapTcache.heap_tcache_pre_230->entries[index];
1055 }
1056 
1064  // check if tcache is even present in this Glibc version
1065  const int tc = rz_config_get_i(core->config, "dbg.glibc.tcache");
1066  if (!tc) {
1067  rz_cons_printf("No tcache present in this version of libc\n");
1068  return NULL;
1069  }
1070 
1071  // get main arena base address to compare
1072  GHT m_arena;
1073  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
1074  return NULL;
1075  }
1076 
1077  GHT brk_start = GHT_MAX, brk_end = GHT_MAX, initial_brk = GHT_MAX;
1078  GH(get_brks)
1079  (core, &brk_start, &brk_end);
1080  GHT fc_offset = GH(tcache_chunk_size)(core, brk_start);
1081  initial_brk = brk_start + fc_offset;
1082  if (brk_start == GHT_MAX || brk_end == GHT_MAX || initial_brk == GHT_MAX) {
1083  // no heap section exists in this case
1084  return NULL;
1085  }
1086 
1087  // get the base address of tcache
1088  GHT tcache_start;
1089  if (arena_base == m_arena) {
1090  // get tcache base for main arena
1091  // tcache is consistently the first allocation in the main arena.
1092  tcache_start = brk_start + 0x10;
1093  } else {
1094  // get tcache base for thread arena
1095  GHT mmap_start = ((arena_base >> 16) << 16);
1096  tcache_start = mmap_start + sizeof(GH(RzHeapInfo)) + sizeof(GH(RzHeap_MallocState_tcache)) + GH(MMAP_ALIGN);
1097 
1098  // for thread arena check if the arena has threads attached or not
1099  MallocState *arena = RZ_NEW0(MallocState);
1100  if (!arena) {
1101  return NULL;
1102  }
1103  if (!GH(rz_heap_update_main_arena)(core, arena_base, arena) || !arena->attached_threads) {
1104  free(arena);
1105  return NULL;
1106  }
1107  free(arena);
1108  }
1109  // Get rz_tcache struct
1110  GH(RTcache) *tcache = GH(tcache_new)(core);
1111  if (!GH(tcache_read)(core, tcache_start, tcache)) {
1112  GH(tcache_free)
1113  (tcache);
1114  return NULL;
1115  }
1116 
1117  // List of heap bins to return
1118  RzList *tcache_bins_list = rz_list_newf((RzListFree)GH(rz_heap_bin_free));
1119 
1120  // Use rz_tcache struct to get bins
1121  for (int i = 0; i < TCACHE_MAX_BINS; i++) {
1122  int count = GH(tcache_get_count)(tcache, i);
1123  GHT entry = GH(tcache_get_entry)(tcache, i);
1124 
1126  if (!bin) {
1127  goto error;
1128  }
1129  bin->type = rz_str_new("Tcache");
1130  bin->bin_num = i;
1132  rz_list_append(tcache_bins_list, bin);
1133  if (count <= 0) {
1134  continue;
1135  }
1136  bin->fd = (ut64)(entry - GH(HDR_SZ));
1137  // get first chunk
1139  if (!chunk) {
1140  goto error;
1141  }
1142  chunk->addr = (ut64)(entry - GH(HDR_SZ));
1143  rz_list_append(bin->chunks, chunk);
1144 
1145  if (count <= 1) {
1146  continue;
1147  }
1148 
1149  // get rest of the chunks
1150  GHT tcache_fd = entry;
1151  GHT tcache_tmp = GHT_MAX;
1152  for (size_t n = 1; n < count; n++) {
1153  bool r = rz_io_nread_at(core->io, tcache_fd, (ut8 *)&tcache_tmp, sizeof(GHT));
1154  if (!r) {
1155  goto error;
1156  }
1157  tcache_tmp = GH(get_next_pointer)(core, tcache_fd, tcache_tmp);
1159  if (!chunk) {
1160  goto error;
1161  }
1162  // the base address of the chunk = address - 2 * PTR_SIZE
1163  chunk->addr = (ut64)(tcache_tmp - GH(HDR_SZ));
1164  rz_list_append(bin->chunks, chunk);
1165  tcache_fd = tcache_tmp;
1166  }
1167  }
1168  free(tcache);
1169  return tcache_bins_list;
1170 
1171 error:
1172  rz_list_free(tcache_bins_list);
1173  free(tcache);
1174  return NULL;
1175 }
1176 
1177 static void GH(print_tcache_content)(RzCore *core, GHT arena_base, GHT main_arena_base, PJ *pj) {
1179 
1180  RzList *bins = GH(rz_heap_tcache_content)(core, arena_base);
1181  if (!bins) {
1182  return;
1183  }
1184  if (!pj) {
1185  if (main_arena_base == arena_base) {
1186  rz_cons_printf("Tcache bins in Main Arena @ ");
1187  } else {
1188  rz_cons_printf("Tcache bins in Thread Arena @ ");
1189  }
1190  PRINTF_YA("0x%" PFMT64x "\n", (ut64)arena_base);
1191  }
1192  RzHeapBin *bin;
1193  RzListIter *iter;
1194  rz_list_foreach (bins, iter, bin) {
1195  if (!bin) {
1196  continue;
1197  }
1198  RzList *chunks = bin->chunks;
1199  if (rz_list_length(chunks) == 0) {
1200  continue;
1201  }
1202  if (!pj) {
1203  rz_cons_printf("%s", bin->type);
1204  rz_cons_printf("_bin[");
1205  PRINTF_BA("%02zu", (size_t)bin->bin_num);
1206  rz_cons_printf("]: Items:");
1207  PRINTF_BA("%2d", rz_list_length(bin->chunks));
1208  rz_cons_newline();
1209  } else {
1210  pj_o(pj);
1211  pj_ks(pj, "bin_type", "tcache");
1212  pj_kn(pj, "bin_num", bin->bin_num);
1213  pj_ka(pj, "chunks");
1214  }
1216  RzListIter *iter2;
1217  rz_list_foreach (chunks, iter2, pos) {
1218  if (!pj) {
1219  rz_cons_printf(" -> ");
1220  }
1222  (core, pos->addr, NULL, pj);
1223  if (!pj) {
1224  rz_cons_newline();
1225  }
1226  }
1227  if (bin->message) {
1228  PRINTF_RA("%s\n", bin->message);
1229  }
1230  if (pj) {
1231  pj_end(pj);
1232  pj_end(pj);
1233  }
1234  }
1235  rz_list_free(bins);
1236 }
1237 
1238 void GH(print_malloc_states)(RzCore *core, GHT m_arena, MallocState *main_arena, bool json) {
1241 
1242  if (!ta) {
1243  return;
1244  }
1245  PJ *pj = NULL;
1246  if (!json) {
1247  rz_cons_printf("Main arena (addr=");
1248  PRINTF_YA("0x%" PFMT64x, (ut64)m_arena);
1249  rz_cons_printf(", lastRemainder=");
1250  PRINTF_YA("0x%" PFMT64x, (ut64)main_arena->last_remainder);
1251  rz_cons_printf(", top=");
1252  PRINTF_YA("0x%" PFMT64x, (ut64)main_arena->top);
1253  rz_cons_printf(", next=");
1254  PRINTF_YA("0x%" PFMT64x, (ut64)main_arena->next);
1255  rz_cons_printf(")\n");
1256  } else {
1257  pj = pj_new();
1258  if (!pj) {
1259  free(ta);
1260  return;
1261  }
1262  pj_o(pj);
1263  pj_ka(pj, "arenas");
1264  pj_o(pj);
1265  pj_kn(pj, "addr", m_arena);
1266  pj_kn(pj, "last_rem", main_arena->last_remainder);
1267  pj_kn(pj, "top", main_arena->top);
1268  pj_kn(pj, "next", main_arena->next);
1269  pj_ks(pj, "type", "main");
1270  pj_ks(pj, "state", "used");
1271  pj_end(pj);
1272  }
1273  if (main_arena->next != m_arena) {
1274  ta->next = main_arena->next;
1275  while (GH(is_arena)(core, m_arena, ta->next) && ta->next != m_arena) {
1276  ut64 ta_addr = ta->next;
1277  if (!GH(rz_heap_update_main_arena)(core, ta->next, ta)) {
1278  goto end;
1279  }
1280  if (!json) {
1281  rz_cons_printf("Thread arena(addr=");
1282  PRINTF_YA("0x%" PFMT64x, ta_addr);
1283  rz_cons_printf(", lastRemainder=");
1284  PRINTF_YA("0x%" PFMT64x, (ut64)ta->last_remainder);
1285  rz_cons_printf(", top=");
1286  PRINTF_YA("0x%" PFMT64x, (ut64)ta->top);
1287  rz_cons_printf(", next=");
1288  PRINTF_YA("0x%" PFMT64x, (ut64)ta->next);
1289  if (ta->attached_threads) {
1290  rz_cons_printf(")\n");
1291  } else {
1292  rz_cons_printf(" free)\n");
1293  }
1294  } else {
1295  pj_o(pj);
1296  pj_kn(pj, "addr", (ut64)ta_addr);
1297  pj_kn(pj, "last_rem", ta->last_remainder);
1298  pj_kn(pj, "top", ta->top);
1299  pj_kn(pj, "next", ta->next);
1300  pj_ks(pj, "type", "thread");
1301  if (ta->attached_threads) {
1302  pj_ks(pj, "state", "used");
1303  } else {
1304  pj_ks(pj, "state", "free");
1305  }
1306  pj_end(pj);
1307  }
1308  }
1309  }
1310 end:
1311  if (json) {
1312  pj_end(pj);
1313  pj_end(pj);
1315  pj_free(pj);
1316  }
1317  free(ta);
1318 }
1319 
1320 void GH(print_inst_minfo)(GH(RzHeapInfo) * heap_info, GHT hinfo) {
1322 
1323  PRINT_YA("malloc_info @ ");
1324  PRINTF_BA("0x%" PFMT64x, (ut64)hinfo);
1325  PRINT_YA(" {\n ar_ptr = ");
1326  PRINTF_BA("0x%" PFMT64x "\n", (ut64)heap_info->ar_ptr);
1327  PRINT_YA(" prev = ");
1328  PRINTF_BA("0x%" PFMT64x "\n", (ut64)heap_info->prev);
1329  PRINT_YA(" size = ");
1330  PRINTF_BA("0x%" PFMT64x "\n", (ut64)heap_info->size);
1331  PRINT_YA(" mprotect_size = ");
1332  PRINTF_BA("0x%" PFMT64x "\n", (ut64)heap_info->mprotect_size);
1333  PRINT_YA("}\n\n");
1334 }
1335 
1336 void GH(print_malloc_info)(RzCore *core, GHT m_state, GHT malloc_state) {
1337  GHT h_info;
1339  if (malloc_state == m_state) {
1340  PRINT_RA("main_arena does not have an instance of malloc_info\n");
1341  } else if (GH(is_arena)(core, malloc_state, m_state)) {
1342 
1343  h_info = (malloc_state >> 16) << 16;
1344  GH(RzHeapInfo) *heap_info = RZ_NEW0(GH(RzHeapInfo));
1345  if (!heap_info) {
1346  return;
1347  }
1348  rz_io_read_at(core->io, h_info, (ut8 *)heap_info, sizeof(GH(RzHeapInfo)));
1350  (heap_info, h_info);
1352  if (!ms) {
1353  free(heap_info);
1354  return;
1355  }
1356 
1357  while (heap_info->prev != 0x0 && heap_info->prev != GHT_MAX) {
1358  if (!GH(rz_heap_update_main_arena)(core, malloc_state, ms)) {
1359  free(ms);
1360  free(heap_info);
1361  return;
1362  }
1363  if ((ms->top >> 16) << 16 != h_info) {
1364  h_info = (ms->top >> 16) << 16;
1365  rz_io_read_at(core->io, h_info, (ut8 *)heap_info, sizeof(GH(RzHeapInfo)));
1367  (heap_info, h_info);
1368  }
1369  }
1370  free(heap_info);
1371  free(ms);
1372  } else {
1373  PRINT_RA("This address is not part of the arenas\n");
1374  }
1375 }
1376 
1377 char *GH(rz_bin_num_to_type)(int bin_num) {
1378  if (bin_num == 0) {
1379  return rz_str_new("Unsorted");
1380  } else if (bin_num >= 1 && bin_num <= NSMALLBINS - 1) {
1381  return rz_str_new("Small");
1382  } else if (bin_num >= NSMALLBINS && bin_num <= NBINS - 2) {
1383  return rz_str_new("Large");
1384  }
1385  return NULL;
1386 }
1387 
1389  if (!bin) {
1390  return;
1391  }
1392  free(bin->type);
1393  free(bin->message);
1394  rz_list_free(bin->chunks);
1395  free(bin);
1396 }
1404 RZ_API RzHeapBin *GH(rz_heap_bin_content)(RzCore *core, MallocState *main_arena, int bin_num, GHT m_arena) {
1405  int idx = 2 * bin_num;
1406  ut64 fw = main_arena->bins[idx];
1407  ut64 bk = main_arena->bins[idx + 1];
1409  if (!bin) {
1410  return NULL;
1411  }
1412  bin->fd = fw;
1413  bin->bk = bk;
1414  bin->bin_num = bin_num;
1415  bin->type = GH(rz_bin_num_to_type)(bin_num);
1416 
1417  // small bins hold chunks of a fixed size
1418  if (!strcmp(bin->type, "Small")) {
1419  bin->size = 4 * SZ + (bin_num - 1) * 2 * SZ;
1420  }
1421 
1422  bin->chunks = rz_list_newf(free);
1423  GH(RzHeapChunk) *head = RZ_NEW0(GH(RzHeapChunk));
1424  if (!head) {
1426  (bin);
1427  return NULL;
1428  }
1429 
1430  (void)rz_io_read_at(core->io, bk, (ut8 *)head, sizeof(GH(RzHeapChunk)));
1431 
1432  if (head->fd == fw) {
1433  return bin;
1434  }
1435  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
1436  if (!cnk) {
1438  (bin);
1439  return NULL;
1440  }
1441  GHT brk_start = GHT_MAX, brk_end = GHT_MAX, initial_brk = GHT_MAX;
1442  GH(get_brks)
1443  (core, &brk_start, &brk_end);
1444  if (brk_start == GHT_MAX || brk_end == GHT_MAX) {
1445  free(cnk);
1446  return bin;
1447  }
1448  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
1449  int offset;
1450  GHT base;
1451  if (tcache) {
1452  offset = 16;
1453  const int fc_offset = rz_config_get_i(core->config, "dbg.glibc.fc_offset");
1454  base = m_arena + offset + SZ * bin_num * 2 + 10 * SZ;
1455  initial_brk = ((brk_start >> 12) << 12) + fc_offset;
1456  } else {
1457  offset = 12 * SZ + sizeof(int) * 2;
1458  base = m_arena + offset + SZ * bin_num * 2 - SZ * 2;
1459  initial_brk = (brk_start >> 12) << 12;
1460  }
1461  bin->addr = base;
1462  while (fw != head->fd) {
1463  if (fw > main_arena->top || fw < initial_brk) {
1464  bin->message = rz_str_new("Corrupted list");
1465  break;
1466  }
1467  rz_io_read_at(core->io, fw, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
1469  if (!chunk) {
1470  break;
1471  }
1472  chunk->addr = fw;
1473  rz_list_append(bin->chunks, chunk);
1474  fw = cnk->fd;
1475  }
1476  free(cnk);
1477  free(head);
1478  return bin;
1479 }
1487 static int GH(print_bin_content)(RzCore *core, MallocState *main_arena, int bin_num, PJ *pj, GHT m_arena) {
1488  RzListIter *iter;
1490  RzHeapBin *bin = GH(rz_heap_bin_content)(core, main_arena, bin_num, m_arena);
1491  RzList *chunks = bin->chunks;
1492  if (rz_list_length(chunks) == 0) {
1494  (bin);
1495  return 0;
1496  }
1497  int chunks_cnt = 0;
1499  if (!pj) {
1500  rz_cons_printf("%s", bin->type);
1501  rz_cons_printf("_bin[");
1502  PRINTF_BA("%d", bin->bin_num);
1503  rz_cons_printf("]: fd=");
1504  PRINTF_YA("0x%" PFMT64x, bin->fd);
1505  rz_cons_printf(", bk=");
1506  PRINTF_YA("0x%" PFMT64x, bin->bk);
1507  rz_cons_printf(", base=");
1508  PRINTF_YA("0x%" PFMT64x, bin->addr);
1509  if (!strcmp(bin->type, "Small")) {
1510  rz_cons_printf(", size=");
1511  PRINTF_BA("0x%" PFMT64x, bin->size);
1512  }
1513  rz_cons_newline();
1514  } else {
1515  pj_kn(pj, "fd", bin->fd);
1516  pj_kn(pj, "bk", bin->bk);
1517  pj_kn(pj, "base", bin->addr);
1518  pj_ka(pj, "chunks");
1519  }
1520  rz_list_foreach (chunks, iter, pos) {
1521  if (!pj) {
1522  rz_cons_printf(" -> ");
1523  }
1525  (core, pos->addr, NULL, pj);
1526  if (!pj) {
1527  rz_cons_newline();
1528  }
1529  chunks_cnt += 1;
1530  }
1531  if (bin->message) {
1532  PRINTF_RA("%s\n", bin->message);
1533  }
1535  (bin);
1536  if (pj) {
1537  pj_end(pj);
1538  }
1539  return chunks_cnt;
1540 }
1541 
1548 static void GH(print_unsortedbin_description)(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj) {
1550  if (!pj) {
1551  rz_cons_printf("Unsorted bin in Arena @ ");
1552  PRINTF_YA("0x%" PFMT64x "\n", (ut64)m_arena);
1553  }
1554  if (pj) {
1555  pj_o(pj);
1556  pj_kn(pj, "bin_num", 0);
1557  pj_ks(pj, "bin_type", "unsorted");
1558  }
1559  int chunk_cnt = GH(print_bin_content)(core, main_arena, 0, pj, m_arena);
1560  if (!pj) {
1561  rz_cons_printf("Found %d chunks in unsorted bin\n", chunk_cnt);
1562  } else {
1563  pj_end(pj);
1564  }
1565 }
1566 
1573 static void GH(print_smallbin_description)(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj) {
1575  if (!pj) {
1576  rz_cons_printf("Small bins in Arena @ ");
1577  PRINTF_YA("0x%" PFMT64x "\n", (ut64)m_arena);
1578  }
1579  int chunk_cnt = 0;
1580  int non_empty_cnt = 0;
1581  for (int bin_num = 1; bin_num < NSMALLBINS; bin_num++) {
1582  if (pj) {
1583  pj_o(pj);
1584  pj_kn(pj, "bin_num", bin_num);
1585  pj_ks(pj, "bin_type", "small");
1586  }
1587  int chunk_found = GH(print_bin_content)(core, main_arena, bin_num, pj, m_arena);
1588  if (pj) {
1589  pj_end(pj);
1590  }
1591  if (chunk_found > 0) {
1592  non_empty_cnt += 1;
1593  }
1594  chunk_cnt += chunk_found;
1595  }
1596  if (!pj) {
1597  rz_cons_printf("Found %d chunks in %d small bins\n", chunk_cnt, non_empty_cnt);
1598  }
1599 }
1600 
1607 static void GH(print_largebin_description)(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj) {
1609  if (!pj) {
1610  rz_cons_printf("Large bins in Arena @ ");
1611  PRINTF_YA("0x%" PFMT64x "\n", (ut64)m_arena);
1612  }
1613  int chunk_cnt = 0;
1614  int non_empty_cnt = 0;
1615  for (int bin_num = NSMALLBINS; bin_num < NBINS - 2; bin_num++) {
1616  if (pj) {
1617  pj_o(pj);
1618  pj_kn(pj, "bin_num", bin_num);
1619  pj_ks(pj, "bin_type", "large");
1620  }
1621  int chunk_found = GH(print_bin_content)(core, main_arena, bin_num, pj, m_arena);
1622  if (pj) {
1623  pj_end(pj);
1624  }
1625  if (chunk_found > 0) {
1626  non_empty_cnt += 1;
1627  }
1628  chunk_cnt += chunk_found;
1629  }
1630  if (!pj) {
1631  rz_cons_printf("Found %d chunks in %d large bins\n", chunk_cnt, non_empty_cnt);
1632  }
1633 }
1634 
1643 static void GH(print_main_arena_bins)(RzCore *core, GHT m_arena, MallocState *main_arena, GHT main_arena_base, GHT global_max_fast, RzHeapBinType format, bool json) {
1644  rz_return_if_fail(core && core->dbg && core->dbg->maps);
1645  PJ *pj = NULL;
1646  if (json) {
1647  pj = pj_new();
1648  if (!pj) {
1649  return;
1650  }
1651  pj_o(pj);
1652  pj_ka(pj, "bins");
1653  }
1654  if (format == RZ_HEAP_BIN_ANY || format == RZ_HEAP_BIN_TCACHE) {
1656  (core, m_arena, main_arena_base, pj);
1657  rz_cons_newline();
1658  }
1659  if (format == RZ_HEAP_BIN_ANY || format == RZ_HEAP_BIN_FAST) {
1660  char *input = rz_str_newlen("", 1);
1661  bool main_arena_only = true;
1663  (core, m_arena, main_arena, global_max_fast, input, main_arena_only, pj);
1664  free(input);
1665  rz_cons_newline();
1666  }
1667  if (format == RZ_HEAP_BIN_ANY || format == RZ_HEAP_BIN_UNSORTED) {
1669  (core, m_arena, main_arena, pj);
1670  rz_cons_newline();
1671  }
1672  if (format == RZ_HEAP_BIN_ANY || format == RZ_HEAP_BIN_SMALL) {
1674  (core, m_arena, main_arena, pj);
1675  rz_cons_newline();
1676  }
1677  if (format == RZ_HEAP_BIN_ANY || format == RZ_HEAP_BIN_LARGE) {
1679  (core, m_arena, main_arena, pj);
1680  rz_cons_newline();
1681  }
1682  if (json) {
1683  pj_end(pj);
1684  pj_end(pj);
1686  pj_free(pj);
1687  }
1688 }
1689 
1691  free(item->arena);
1692  free(item->type);
1693  free(item);
1694 }
1702 RZ_API RzList *GH(rz_heap_arenas_list)(RzCore *core, GHT m_arena, MallocState *main_arena) {
1705  if (!ta) {
1706  return arena_list;
1707  }
1708  // main arena
1709  if (!GH(rz_heap_update_main_arena)(core, m_arena, ta)) {
1710  free(ta);
1711  return arena_list;
1712  }
1714  if (!item) {
1715  free(ta);
1716  return arena_list;
1717  }
1718  item->addr = m_arena;
1719  item->type = rz_str_new("Main");
1720  item->arena = ta;
1721  rz_list_append(arena_list, item);
1722  if (main_arena->next != m_arena) {
1723  ta->next = main_arena->next;
1724  while (GH(is_arena)(core, m_arena, ta->next) && ta->next != m_arena) {
1725  ut64 ta_addr = ta->next;
1726  ta = RZ_NEW0(MallocState);
1727  if (!GH(rz_heap_update_main_arena)(core, ta_addr, ta)) {
1728  free(ta);
1729  return arena_list;
1730  }
1731  // thread arenas
1732  item = RZ_NEW0(RzArenaListItem);
1733  if (!item) {
1734  free(ta);
1735  break;
1736  }
1737  item->addr = ta_addr;
1738  item->type = rz_str_new("Thread");
1739  item->arena = ta;
1740  rz_list_append(arena_list, item);
1741  }
1742  }
1743  return arena_list;
1744 }
1745 
1756  GHT m_arena, GHT m_state, bool top_chunk) {
1758  if (!core || !core->dbg || !core->dbg->maps) {
1759  return chunks;
1760  }
1761  GHT global_max_fast = (64 * SZ / 4);
1762  GHT brk_start = GHT_MAX, brk_end = GHT_MAX, size_tmp, min_size = SZ * 4;
1763  GHT tcache_fd = GHT_MAX, tcache_tmp = GHT_MAX;
1764  GHT initial_brk = GHT_MAX, tcache_initial_brk = GHT_MAX;
1765 
1766  const int tcache = rz_config_get_i(core->config, "dbg.glibc.tcache");
1767  const int offset = rz_config_get_i(core->config, "dbg.glibc.fc_offset");
1769  int glibc_version = core->dbg->glibc_version;
1770 
1771  if (m_arena == m_state) {
1772  GH(get_brks)
1773  (core, &brk_start, &brk_end);
1774  if (tcache) {
1775  initial_brk = ((brk_start >> 12) << 12) + GH(HDR_SZ);
1776  if (rz_config_get_b(core->config, "cfg.debug")) {
1777  tcache_initial_brk = initial_brk;
1778  }
1779  initial_brk += (glibc_version < 230)
1780  ? sizeof(GH(RzHeapTcachePre230))
1781  : sizeof(GH(RzHeapTcache));
1782  } else {
1783  initial_brk = (brk_start >> 12) << 12;
1784  }
1785  } else {
1786  brk_start = ((m_state >> 16) << 16);
1787  brk_end = brk_start + main_arena->system_mem;
1788  if (tcache) {
1789  tcache_initial_brk = brk_start + sizeof(GH(RzHeapInfo)) + sizeof(GH(RzHeap_MallocState_tcache)) + GH(MMAP_ALIGN);
1790  initial_brk = tcache_initial_brk + offset;
1791  } else {
1792  initial_brk = brk_start + sizeof(GH(RzHeapInfo)) + sizeof(GH(RzHeap_MallocState)) + MMAP_OFFSET;
1793  }
1794  }
1795 
1796  if (brk_start == GHT_MAX || brk_end == GHT_MAX || initial_brk == GHT_MAX) {
1797  eprintf("No Heap section\n");
1798  return chunks;
1799  }
1800 
1801  GHT next_chunk = initial_brk, prev_chunk = next_chunk;
1802  GH(RzHeapChunk) *cnk = RZ_NEW0(GH(RzHeapChunk));
1803  if (!cnk) {
1804  return chunks;
1805  }
1806  GH(RzHeapChunk) *cnk_next = RZ_NEW0(GH(RzHeapChunk));
1807  if (!cnk_next) {
1808  free(cnk);
1809  return chunks;
1810  }
1811 
1812  (void)rz_io_read_at(core->io, next_chunk, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
1813  size_tmp = (cnk->size >> 3) << 3;
1814  ut64 prev_chunk_addr;
1815  ut64 prev_chunk_size;
1816  while (next_chunk && next_chunk >= brk_start && next_chunk < main_arena->top) {
1817  if (size_tmp < min_size || next_chunk + size_tmp > main_arena->top) {
1819  if (!block) {
1820  break;
1821  }
1822  block->addr = next_chunk;
1823  block->status = rz_str_new("corrupted");
1824  block->size = size_tmp;
1825  rz_list_append(chunks, block);
1826  break;
1827  }
1828 
1829  prev_chunk_addr = (ut64)prev_chunk;
1830  prev_chunk_size = (((ut64)cnk->size) >> 3) << 3;
1831  bool fastbin = size_tmp >= SZ * 4 && size_tmp <= global_max_fast;
1832  bool is_free = false, double_free = false;
1833 
1834  if (fastbin) {
1835  int i = (size_tmp / (SZ * 2)) - 2;
1836  GHT idx = (GHT)main_arena->fastbinsY[i];
1837  (void)rz_io_read_at(core->io, idx, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
1838  GHT next = GH(get_next_pointer)(core, idx, cnk->fd);
1839  if (prev_chunk == idx && idx && !next) {
1840  is_free = true;
1841  }
1842  while (next && next >= brk_start && next < main_arena->top) {
1843  if (prev_chunk == idx || prev_chunk == next || idx == next) {
1844  is_free = true;
1845  if (idx == next) {
1846  double_free = true;
1847  break;
1848  }
1849  (void)rz_io_read_at(core->io, next, (ut8 *)cnk_next, sizeof(GH(RzHeapChunk)));
1850  GHT next_node = GH(get_next_pointer)(core, next, cnk_next->fd);
1851  // avoid triple while?
1852  while (next_node && next_node >= brk_start && next_node < main_arena->top) {
1853  if (prev_chunk == next_node) {
1854  double_free = true;
1855  break;
1856  }
1857  (void)rz_io_read_at(core->io, next_node, (ut8 *)cnk_next, sizeof(GH(RzHeapChunk)));
1858  next_node = GH(get_next_pointer)(core, next_node, cnk_next->fd);
1859  }
1860  if (double_free) {
1861  break;
1862  }
1863  }
1864  (void)rz_io_read_at(core->io, next, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
1865  next = GH(get_next_pointer)(core, next, cnk->fd);
1866  }
1867  if (double_free) {
1868  PRINT_RA(" Double free in simple-linked list detected ");
1869  break;
1870  }
1871  prev_chunk_size = ((i + 1) * GH(HDR_SZ)) + GH(HDR_SZ);
1872  }
1873 
1874  if (tcache) {
1875  GH(RTcache) *tcache_heap = GH(tcache_new)(core);
1876  if (!tcache_heap) {
1877  free(cnk);
1878  free(cnk_next);
1879  return chunks;
1880  }
1881  GH(tcache_read)
1882  (core, tcache_initial_brk, tcache_heap);
1883  size_t i;
1884  for (i = 0; i < TCACHE_MAX_BINS; i++) {
1885  int count = GH(tcache_get_count)(tcache_heap, i);
1886  GHT entry = GH(tcache_get_entry)(tcache_heap, i);
1887  if (count > 0) {
1888  if (entry - SZ * 2 == prev_chunk) {
1889  is_free = true;
1890  prev_chunk_size = ((i + 1) * TC_HDR_SZ + GH(TC_SZ));
1891  break;
1892  }
1893  if (count > 1) {
1894  tcache_fd = entry;
1895  int n;
1896  for (n = 1; n < count; n++) {
1897  bool r = rz_io_read_at(core->io, tcache_fd, (ut8 *)&tcache_tmp, sizeof(GHT));
1898  if (!r) {
1899  break;
1900  }
1901  tcache_tmp = GH(get_next_pointer)(core, tcache_fd, read_le(&tcache_tmp));
1902  if (tcache_tmp - SZ * 2 == prev_chunk) {
1903  is_free = true;
1904  prev_chunk_size = ((i + 1) * TC_HDR_SZ + GH(TC_SZ));
1905  break;
1906  }
1907  tcache_fd = (ut64)tcache_tmp;
1908  }
1909  }
1910  }
1911  }
1912  GH(tcache_free)
1913  (tcache_heap);
1914  }
1915 
1916  next_chunk += size_tmp;
1917  prev_chunk = next_chunk;
1918  rz_io_read_at(core->io, next_chunk, (ut8 *)cnk, sizeof(GH(RzHeapChunk)));
1919  size_tmp = (cnk->size >> 3) << 3;
1921  if (!block) {
1922  break;
1923  }
1924  char *status = rz_str_new("allocated");
1925  if (fastbin) {
1926  if (is_free) {
1927  strcpy(status, "free");
1928  }
1929  }
1930  if (!(cnk->size & 1)) {
1931  strcpy(status, "free");
1932  }
1933  if (tcache) {
1934  if (is_free) {
1935  strcpy(status, "free");
1936  }
1937  }
1938  block->addr = prev_chunk_addr;
1939  block->status = status;
1940  block->size = prev_chunk_size;
1941  rz_list_append(chunks, block);
1942  }
1943  if (top_chunk) {
1945  if (block) {
1946  block->addr = main_arena->top;
1947  block->status = rz_str_new("free (top)");
1948  RzHeapChunkSimple *chunkSimple = GH(rz_heap_chunk_wrapper)(core, main_arena->top);
1949  if (chunkSimple) {
1950  block->size = chunkSimple->size;
1951  free(chunkSimple);
1952  }
1953  rz_list_append(chunks, block);
1954  }
1955  }
1956  free(cnk);
1957  free(cnk_next);
1958  return chunks;
1959 }
1960 
1961 RZ_IPI RzCmdStatus GH(rz_cmd_arena_print_handler)(RzCore *core, int argc, const char **argv) {
1962  GHT m_arena = GHT_MAX;
1964  MallocState *main_arena = RZ_NEW0(MallocState);
1965  if (!main_arena) {
1966  return RZ_CMD_STATUS_ERROR;
1967  }
1968  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
1969  free(main_arena);
1970  return RZ_CMD_STATUS_ERROR;
1971  }
1972  if (!GH(rz_heap_update_main_arena)(core, m_arena, main_arena)) {
1973  free(main_arena);
1974  return RZ_CMD_STATUS_ERROR;
1975  }
1976  RzList *arenas_list = GH(rz_heap_arenas_list)(core, m_arena, main_arena);
1977  RzListIter *iter;
1979  bool flag = false;
1980  rz_list_foreach (arenas_list, iter, pos) {
1981  MallocState *arena = pos->arena;
1982  if (!flag) {
1983  flag = true;
1984  rz_cons_printf("Main arena (addr=");
1985  } else {
1986  rz_cons_printf("Thread arena(addr=");
1987  }
1988  PRINTF_YA("0x%" PFMT64x, (ut64)pos->addr);
1989  rz_cons_printf(", lastRemainder=");
1990  PRINTF_YA("0x%" PFMT64x, (ut64)arena->last_remainder);
1991  rz_cons_printf(", top=");
1992  PRINTF_YA("0x%" PFMT64x, (ut64)arena->top);
1993  rz_cons_printf(", next=");
1994  PRINTF_YA("0x%" PFMT64x, (ut64)arena->next);
1995  if (arena->attached_threads) {
1996  rz_cons_printf(")\n");
1997  } else {
1998  rz_cons_printf(", free)\n");
1999  }
2000  }
2001  rz_list_free(arenas_list);
2002  free(main_arena);
2003  return RZ_CMD_STATUS_OK;
2004 }
2005 
2007  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2009  MallocState *main_arena = RZ_NEW0(MallocState);
2010  RzOutputMode mode = state->mode;
2011  if (!main_arena) {
2012  return RZ_CMD_STATUS_ERROR;
2013  }
2014  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2015  free(main_arena);
2016  return RZ_CMD_STATUS_ERROR;
2017  }
2018  if (argc == 1) {
2019  m_state = m_arena;
2020  } else if (argc == 2) {
2021  m_state = rz_num_get(NULL, argv[1]);
2022  }
2023  if (!GH(is_arena)(core, m_arena, m_state)) {
2024  free(main_arena);
2025  PRINT_RA("This address is not a valid arena\n");
2026  return RZ_CMD_STATUS_ERROR;
2027  }
2028  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2029  free(main_arena);
2030  return RZ_CMD_STATUS_ERROR;
2031  }
2032  GHT brk_start, brk_end;
2033  if (m_arena == m_state) {
2034  GH(get_brks)
2035  (core, &brk_start, &brk_end);
2036 
2037  } else {
2038  brk_start = ((m_state >> 16) << 16);
2039  brk_end = brk_start + main_arena->system_mem;
2040  }
2041  RzListIter *iter;
2043  PJ *pj = state->d.pj;
2044  int w, h;
2045  RzConfigHold *hc = rz_config_hold_new(core->config);
2046  if (!hc) {
2047  free(main_arena);
2048  return RZ_CMD_STATUS_ERROR;
2049  }
2050  w = rz_cons_get_size(&h);
2052  if (!can) {
2053  free(main_arena);
2054  rz_config_hold_free(hc);
2055  return RZ_CMD_STATUS_ERROR;
2056  }
2057 
2058  RzAGraph *g = rz_agraph_new(can);
2059  if (!g) {
2060  free(main_arena);
2061  rz_cons_canvas_free(can);
2063  rz_config_hold_free(hc);
2064  return RZ_CMD_STATUS_ERROR;
2065  }
2066  RzANode *top = RZ_EMPTY, *chunk_node = RZ_EMPTY, *prev_node = RZ_EMPTY;
2067  char *top_title = NULL, *top_data = NULL, *node_title = NULL, *node_data = NULL;
2068  bool first_node = true;
2069  top_data = rz_str_new("");
2070  RzList *chunks = GH(rz_heap_chunks_list)(core, main_arena, m_arena, m_state, false);
2071  if (mode == RZ_OUTPUT_MODE_JSON) {
2072  if (!pj) {
2073  goto end;
2074  }
2075  pj_o(pj);
2076  pj_ka(pj, "chunks");
2077  } else if (mode == RZ_OUTPUT_MODE_STANDARD || mode == RZ_OUTPUT_MODE_LONG) {
2078  rz_cons_printf("Arena @ ");
2079  PRINTF_YA("0x%" PFMT64x, (ut64)m_state);
2080  rz_cons_newline();
2081  } else if (mode == RZ_OUTPUT_MODE_LONG_JSON) {
2082  can->linemode = rz_config_get_i(core->config, "graph.linemode");
2083  can->color = rz_config_get_i(core->config, "scr.color");
2084  core->cons->use_utf8 = rz_config_get_i(core->config, "scr.utf8");
2085  g->layout = rz_config_get_i(core->config, "graph.layout");
2086  rz_agraph_set_title(g, "Heap Layout");
2087  top_title = rz_str_newf("Top chunk @ 0x%" PFMT64x "\n", (ut64)main_arena->top);
2088  }
2089  rz_list_foreach (chunks, iter, pos) {
2092  (core, pos->addr, pos->status, NULL);
2093  rz_cons_newline();
2094  if (mode == RZ_OUTPUT_MODE_LONG) {
2095  int size = 0x10;
2096  char *data = calloc(1, size);
2097  if (data) {
2098  rz_io_nread_at(core->io, (ut64)(pos->addr + SZ * 2), (ut8 *)data, size);
2099  core->print->flags &= ~RZ_PRINT_FLAGS_HEADER;
2100  core->print->pairs = false;
2101  rz_cons_printf(" ");
2102  rz_core_print_hexdump(core, (ut64)(pos->addr + SZ * 2), (ut8 *)data, size, SZ * 2, 1, 1);
2103  core->print->flags |= RZ_PRINT_FLAGS_HEADER;
2104  core->print->pairs = true;
2105  free(data);
2106  }
2107  }
2108  } else if (mode == RZ_OUTPUT_MODE_JSON) {
2109  pj_o(pj);
2110  pj_kn(pj, "addr", pos->addr);
2111  pj_kn(pj, "size", pos->size);
2112  pj_ks(pj, "status", pos->status);
2113  pj_end(pj);
2114  } else if (mode == RZ_OUTPUT_MODE_RIZIN) {
2115  rz_cons_printf("fs heap.%s\n", pos->status);
2116  char *name = rz_str_newf("chunk.%06" PFMT64x, ((pos->addr >> 4) & 0xffffULL));
2117  rz_cons_printf("f %s %d @ 0x%" PFMT64x "\n", name, (int)pos->size, (ut64)pos->addr);
2118  free(name);
2119  } else if (mode == RZ_OUTPUT_MODE_LONG_JSON) { // graph
2120  free(node_title);
2121  free(node_data);
2122  node_title = rz_str_newf(" Malloc chunk @ 0x%" PFMT64x " ", (ut64)pos->addr);
2123  node_data = rz_str_newf("size: 0x%" PFMT64x " status: %s\n", (ut64)pos->size, pos->status);
2124  chunk_node = rz_agraph_add_node(g, node_title, node_data);
2125  if (first_node) {
2126  first_node = false;
2127  } else {
2128  rz_agraph_add_edge(g, prev_node, chunk_node);
2129  }
2130  prev_node = chunk_node;
2131  }
2132  }
2135  (core, main_arena->top, "free", NULL);
2136  PRINT_RA("[top]");
2137  rz_cons_printf("[brk_start: ");
2138  PRINTF_YA("0x%" PFMT64x, (ut64)brk_start);
2139  rz_cons_printf(", brk_end: ");
2140  PRINTF_YA("0x%" PFMT64x, (ut64)brk_end);
2141  rz_cons_printf("]");
2142  } else if (mode == RZ_OUTPUT_MODE_JSON) {
2143  pj_end(pj);
2144  pj_kn(pj, "top", main_arena->top);
2145  pj_kn(pj, "brk", brk_start);
2146  pj_kn(pj, "end", brk_end);
2147  pj_end(pj);
2148  } else if (mode == RZ_OUTPUT_MODE_RIZIN) {
2149  rz_cons_printf("fs-\n");
2150  rz_cons_printf("f heap.top @ 0x%08" PFMT64x "\n", (ut64)main_arena->top);
2151  rz_cons_printf("f heap.brk @ 0x%08" PFMT64x "\n", (ut64)brk_start);
2152  rz_cons_printf("f heap.end @ 0x%08" PFMT64x "\n", (ut64)brk_end);
2153  } else if (mode == RZ_OUTPUT_MODE_LONG_JSON) {
2154  top = rz_agraph_add_node(g, top_title, top_data);
2155  if (!first_node) {
2156  rz_agraph_add_edge(g, prev_node, top);
2157  free(node_data);
2158  free(node_title);
2159  }
2160  rz_agraph_print(g);
2161  }
2162 end:
2163  rz_cons_newline();
2164  free(g);
2165  free(top_data);
2166  free(top_title);
2167  rz_list_free(chunks);
2168  free(main_arena);
2169  rz_cons_canvas_free(can);
2171  rz_config_hold_free(hc);
2172  return RZ_CMD_STATUS_OK;
2173 }
2174 
2176  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2178  GHT global_max_fast = (64 * SZ / 4);
2179  MallocState *main_arena = RZ_NEW0(MallocState);
2180  if (!main_arena) {
2181  return RZ_CMD_STATUS_ERROR;
2182  }
2183  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2184  free(main_arena);
2185  return RZ_CMD_STATUS_ERROR;
2186  }
2187  if (argc == 1) {
2188  m_state = m_arena;
2189  } else if (argc == 2) {
2190  m_state = rz_num_get(NULL, argv[1]);
2191  }
2192  if (!GH(is_arena)(core, m_arena, m_state)) {
2193  PRINT_RA("This address is not a valid arena\n");
2194  free(main_arena);
2195  return RZ_CMD_STATUS_ERROR;
2196  }
2197  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2198  free(main_arena);
2199  return RZ_CMD_STATUS_ERROR;
2200  }
2202  (core, m_state, main_arena, global_max_fast, mode);
2203  free(main_arena);
2204  return RZ_CMD_STATUS_OK;
2205 }
2206 
2208  GHT m_arena = GHT_MAX;
2209  MallocState *main_arena = RZ_NEW0(MallocState);
2210  if (!main_arena) {
2211  return RZ_CMD_STATUS_ERROR;
2212  }
2213  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2214  free(main_arena);
2215  return RZ_CMD_STATUS_ERROR;
2216  }
2217  ut64 addr = core->offset;
2219  (core, addr);
2220  free(main_arena);
2221  return RZ_CMD_STATUS_OK;
2222 }
2223 
2225  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2227  MallocState *main_arena = RZ_NEW0(MallocState);
2228  if (!main_arena) {
2229  return RZ_CMD_STATUS_ERROR;
2230  }
2231  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2232  free(main_arena);
2233  return RZ_CMD_STATUS_ERROR;
2234  }
2235  if (argc == 1) {
2236  m_state = m_arena;
2237  } else if (argc == 2) {
2238  m_state = rz_num_get(NULL, argv[1]);
2239  }
2240  if (!GH(is_arena)(core, m_arena, m_state)) {
2241  PRINT_RA("This address is not a valid arena\n");
2242  free(main_arena);
2243  return RZ_CMD_STATUS_ERROR;
2244  }
2245  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2246  free(main_arena);
2247  return RZ_CMD_STATUS_ERROR;
2248  }
2250  (core, m_arena, m_state);
2251  free(main_arena);
2252  return RZ_CMD_STATUS_OK;
2253 }
2254 
2256  GHT m_arena = GHT_MAX;
2257  MallocState *main_arena = RZ_NEW0(MallocState);
2258  if (!main_arena) {
2259  return RZ_CMD_STATUS_ERROR;
2260  }
2261  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2262  free(main_arena);
2263  return RZ_CMD_STATUS_ERROR;
2264  }
2265  if (!GH(rz_heap_update_main_arena)(core, m_arena, main_arena)) {
2266  free(main_arena);
2267  return RZ_CMD_STATUS_ERROR;
2268  }
2269 
2270  // if no tcache in this version of glibc just return
2271  const int tc = rz_config_get_i(core->config, "dbg.glibc.tcache");
2272  if (!tc) {
2273  rz_cons_printf("No tcache present in this version of libc\n");
2274  free(main_arena);
2275  return RZ_CMD_STATUS_ERROR;
2276  }
2277 
2278  RzList *arenas_list = GH(rz_heap_arenas_list)(core, m_arena, main_arena);
2279  RzArenaListItem *item;
2280  RzListIter *iter;
2281  rz_list_foreach (arenas_list, iter, item) {
2283  (core, item->addr, m_arena, NULL);
2284  }
2285  free(main_arena);
2286  return RZ_CMD_STATUS_OK;
2287 }
2288 
2290  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2292  MallocState *main_arena = RZ_NEW0(MallocState);
2293  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2294  free(main_arena);
2295  return RZ_CMD_STATUS_ERROR;
2296  }
2297  char *m_state_str, *dup = strdup(input);
2298  if (*dup) {
2299  strtok(dup, ":");
2300  m_state_str = strtok(NULL, ":");
2301  m_state = rz_num_get(NULL, m_state_str);
2302  if (!m_state) {
2303  m_state = m_arena;
2304  }
2305  } else {
2306  if (core->offset != core->prompt_offset) {
2307  m_state = core->offset;
2308  } else {
2309  m_state = m_arena;
2310  }
2311  }
2312  if (GH(is_arena)(core, m_arena, m_state)) {
2313  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2314  free(main_arena);
2315  free(dup);
2316  return RZ_CMD_STATUS_ERROR;
2317  }
2319  (core, m_state, main_arena, dup);
2320  } else {
2321  PRINT_RA("This address is not part of the arenas\n");
2322  free(main_arena);
2323  free(dup);
2324  return RZ_CMD_STATUS_ERROR;
2325  }
2326  free(dup);
2327  free(main_arena);
2328  return RZ_CMD_STATUS_OK;
2329 }
2330 
2331 RZ_IPI int GH(rz_cmd_heap_fastbins_print)(void *data, const char *input) {
2332  RzCore *core = (RzCore *)data;
2333  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2335  MallocState *main_arena = RZ_NEW0(MallocState);
2336  GHT global_max_fast = (64 * SZ / 4);
2337  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2338  free(main_arena);
2339  return RZ_CMD_STATUS_ERROR;
2340  }
2341  bool main_arena_only = false;
2342  char *m_state_str, *dup = strdup(input);
2343  if (*dup) {
2344  strtok(dup, ":");
2345  m_state_str = strtok(NULL, ":");
2346  m_state = rz_num_get(NULL, m_state_str);
2347  if (!m_state) {
2348  m_state = m_arena;
2349  }
2350  } else {
2351  if (core->offset != core->prompt_offset) {
2352  m_state = core->offset;
2353  } else {
2354  m_state = m_arena;
2355  }
2356  }
2357  if (GH(is_arena)(core, m_arena, m_state)) {
2358  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2359  free(dup);
2360  free(main_arena);
2361  return RZ_CMD_STATUS_ERROR;
2362  }
2364  (core, m_state, main_arena, global_max_fast, dup, main_arena_only, NULL);
2365  } else {
2366  PRINT_RA("This address is not part of the arenas\n");
2367  free(dup);
2368  free(main_arena);
2369  return RZ_CMD_STATUS_ERROR;
2370  }
2371  free(dup);
2372  free(main_arena);
2373  return RZ_CMD_STATUS_OK;
2374 }
2375 
2377  GHT m_arena = GHT_MAX, m_state = GHT_MAX;
2379  GHT global_max_fast = (64 * SZ / 4);
2380  MallocState *main_arena = RZ_NEW0(MallocState);
2381  if (!main_arena) {
2382  return RZ_CMD_STATUS_ERROR;
2383  }
2384  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2385  free(main_arena);
2386  return RZ_CMD_STATUS_ERROR;
2387  }
2388  if (core->offset != core->prompt_offset) {
2389  m_state = core->offset;
2390  } else {
2391  m_state = m_arena;
2392  }
2393  if (!GH(is_arena)(core, m_arena, m_state)) {
2394  PRINT_RA("This address is not part of the arenas\n");
2395  free(main_arena);
2396  return RZ_CMD_STATUS_ERROR;
2397  }
2398  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2399  free(main_arena);
2400  return RZ_CMD_STATUS_ERROR;
2401  }
2402 
2403  bool json = false;
2404  if (mode == RZ_OUTPUT_MODE_JSON) { // dmhdj
2405  json = true;
2406  }
2407  RzHeapBinType bin_format = RZ_HEAP_BIN_ANY;
2408  if (argc == 2) {
2409  const char *input = argv[1];
2410  if (!strcmp(input, "tcache")) {
2411  bin_format = RZ_HEAP_BIN_TCACHE;
2412  } else if (!strcmp(input, "fast")) {
2413  bin_format = RZ_HEAP_BIN_FAST;
2414  } else if (!strcmp(input, "unsorted")) {
2415  bin_format = RZ_HEAP_BIN_UNSORTED;
2416  } else if (!strcmp(input, "small")) {
2417  bin_format = RZ_HEAP_BIN_SMALL;
2418  } else if (!strcmp(input, "large")) {
2419  bin_format = RZ_HEAP_BIN_LARGE;
2420  }
2421  }
2423  (core, m_state, main_arena, m_arena, global_max_fast, bin_format, json);
2424  free(main_arena);
2425  return RZ_CMD_STATUS_OK;
2426 }
2427 
2434  GHT m_arena;
2435  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2436  return rz_list_newf(free);
2437  }
2438  MallocState *main_arena = RZ_NEW0(MallocState);
2439  if (!main_arena) {
2440  return rz_list_newf(free);
2441  }
2442  if (!GH(rz_heap_update_main_arena)(core, m_arena, main_arena)) {
2443  free(main_arena);
2444  return rz_list_newf(free);
2445  }
2446  RzList *arenas_list = GH(rz_heap_arenas_list)(core, m_arena, main_arena);
2447  free(main_arena);
2448  return arenas_list;
2449 }
2450 
2458  GHT m_arena;
2459  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2460  return rz_list_newf(free);
2461  }
2462  if (!GH(is_arena)(core, m_arena, m_state)) {
2463  return rz_list_newf(free);
2464  }
2465  MallocState *main_arena = RZ_NEW0(MallocState);
2466  if (!main_arena) {
2467  return rz_list_newf(free);
2468  }
2469  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2470  free(main_arena);
2471  return rz_list_newf(free);
2472  }
2473  RzList *chunks = GH(rz_heap_chunks_list)(core, main_arena, m_arena, m_state, true);
2474  free(main_arena);
2475  return chunks;
2476 }
2477 
2485  GH(RzHeapChunk) *heap_chunk = GH(rz_heap_get_chunk_at_addr)(core, addr);
2486  if (!heap_chunk) {
2487  return NULL;
2488  }
2489  RzHeapChunkSimple *simple_chunk = RZ_NEW0(RzHeapChunkSimple);
2490  if (!simple_chunk) {
2491  free(heap_chunk);
2492  return NULL;
2493  }
2494  ut64 size = heap_chunk->size;
2495  simple_chunk->addr = addr;
2496  simple_chunk->size = size & ~(NON_MAIN_ARENA | IS_MMAPPED | PREV_INUSE);
2497  simple_chunk->non_main_arena = (bool)((size & NON_MAIN_ARENA) >> 2);
2498  simple_chunk->is_mmapped = (bool)((size & IS_MMAPPED) >> 1);
2499  simple_chunk->prev_inuse = (bool)(size & PREV_INUSE);
2500  simple_chunk->prev_size = heap_chunk->prev_size;
2501  simple_chunk->bk = heap_chunk->bk;
2502  simple_chunk->fd = heap_chunk->fd;
2503  simple_chunk->fd_nextsize = heap_chunk->fd_nextsize;
2504  simple_chunk->bk_nextsize = heap_chunk->bk_nextsize;
2505  free(heap_chunk);
2506  return simple_chunk;
2507 }
2508 
2517  GHT m_arena;
2518  if (!GH(rz_heap_resolve_main_arena)(core, &m_arena)) {
2519  return NULL;
2520  }
2521  if (!m_state) {
2522  m_state = m_arena;
2523  }
2524  if (!GH(is_arena)(core, m_arena, m_state)) {
2525  return NULL;
2526  }
2527  MallocState *main_arena = RZ_NEW0(MallocState);
2528  if (!main_arena) {
2529  return NULL;
2530  }
2531  if (!GH(rz_heap_update_main_arena)(core, m_state, main_arena)) {
2532  free(main_arena);
2533  return NULL;
2534  }
2535  return main_arena;
2536 }
2537 
2545  if (!chunk_simple) {
2546  return false;
2547  }
2548  GH(RzHeapChunk) *heap_chunk = RZ_NEW0(GH(RzHeapChunk));
2549  if (!heap_chunk) {
2550  return false;
2551  }
2552 
2553  heap_chunk->size = chunk_simple->size;
2554  // add flag bits to chunk size
2555  if (chunk_simple->prev_inuse) {
2556  heap_chunk->size |= PREV_INUSE;
2557  }
2558  if (chunk_simple->is_mmapped) {
2559  heap_chunk->size |= IS_MMAPPED;
2560  }
2561  if (chunk_simple->non_main_arena) {
2562  heap_chunk->size |= NON_MAIN_ARENA;
2563  }
2564 
2565  heap_chunk->fd = chunk_simple->fd;
2566  heap_chunk->bk = chunk_simple->bk;
2567  heap_chunk->fd_nextsize = chunk_simple->fd_nextsize;
2568  heap_chunk->bk_nextsize = chunk_simple->bk_nextsize;
2569  bool res = rz_io_write_at(core->io, chunk_simple->addr, (ut8 *)heap_chunk, sizeof(GH(RzHeapChunk)));
2570  free(heap_chunk);
2571  return res;
2572 }
RZ_API void rz_agraph_free(RzAGraph *g)
Definition: agraph.c:3909
RZ_API void rz_agraph_set_title(RzAGraph *g, const char *title)
Definition: agraph.c:3720
RZ_API RzAGraph * rz_agraph_new(RzConsCanvas *can)
Definition: agraph.c:3922
RZ_API RzANode * rz_agraph_add_node(const RzAGraph *g, const char *title, const char *body)
Definition: agraph.c:3765
RZ_API void rz_agraph_print(RzAGraph *g)
Definition: agraph.c:3687
RZ_API void rz_agraph_add_edge(const RzAGraph *g, RzANode *a, RzANode *b)
Definition: agraph.c:3862
#define RZ_IPI
Definition: analysis_wasm.c:11
lzma_index ** i
Definition: index.h:629
RZ_API bool rz_bin_file_set_cur_binfile(RzBin *bin, RzBinFile *bf)
Definition: bfile.c:288
RZ_API bool rz_bin_file_delete(RzBin *bin, RzBinFile *bf)
Definition: bfile.c:213
RZ_DEPRECATE RZ_API RZ_BORROW RzBinInfo * rz_bin_get_info(RzBin *bin)
Definition: bin.c:585
RZ_DEPRECATE RZ_API RZ_BORROW RzList * rz_bin_get_symbols(RZ_NONNULL RzBin *bin)
Definition: bin.c:696
RZ_API void rz_bin_options_init(RzBinOptions *opt, int fd, ut64 baseaddr, ut64 loadaddr, bool patch_relocs)
Definition: bin.c:75
RZ_API RzBinFile * rz_bin_open(RzBin *bin, const char *file, RzBinOptions *opt)
Definition: bin.c:200
RZ_API RzBinFile * rz_bin_cur(RzBin *bin)
Definition: bin.c:895
static RzList * maps(RzBinFile *bf)
Definition: bin_bf.c:116
RzBinInfo * info(RzBinFile *bf)
Definition: bin_ne.c:86
RZ_API RzConsCanvas * rz_cons_canvas_new(int w, int h)
Definition: canvas.c:223
RZ_API void rz_cons_canvas_free(RzConsCanvas *c)
Definition: canvas.c:150
RZ_API ut64 rz_config_get_i(RzConfig *cfg, RZ_NONNULL const char *name)
Definition: config.c:119
RZ_API bool rz_config_get_b(RzConfig *cfg, RZ_NONNULL const char *name)
Definition: config.c:142
RZ_API RzConfigNode * rz_config_set_i(RzConfig *cfg, RZ_NONNULL const char *name, const ut64 i)
Definition: config.c:419
RZ_API int rz_cons_get_size(int *rows)
Definition: cons.c:1446
RZ_API RzCons * rz_cons_singleton(void)
Definition: cons.c:300
RZ_API void rz_cons_newline(void)
Definition: cons.c:1274
RZ_API int rz_cons_printf(const char *format,...)
Definition: cons.c:1202
RZ_API void rz_cons_println(const char *str)
Definition: cons.c:233
#define RZ_API
RZ_IPI void rz_core_print_hexdump(RZ_NONNULL RzCore *core, ut64 addr, RZ_NONNULL const ut8 *buf, int len, int base, int step, size_t zoomsz)
Definition: cprint.c:161
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
#define w
Definition: crypto_rc6.c:13
static static fork const void static count static fd const char const char static newpath const char static path const char path
Definition: sflib.h:35
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
Definition: sflib.h:98
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void static offset struct stat static buf void long static basep static whence static length const void static len static semflg const void static shmflg const struct timespec struct timespec static rem const char static group const void start
Definition: sflib.h:133
uint32_t ut32
RZ_API bool rz_debug_map_sync(RzDebug *dbg)
Definition: dmap.c:33
const char * k
Definition: dsignal.c:11
const char * v
Definition: dsignal.c:12
struct @667 g
size_t map(int syms, int left, int len)
Definition: enough.c:237
RZ_API void rz_config_hold_restore(RzConfigHold *h)
Restore whatever config options were previously saved in h.
Definition: hold.c:132
RZ_API RzConfigHold * rz_config_hold_new(RzConfig *cfg)
Create an opaque object to save/restore some configuration options.
Definition: hold.c:116
RZ_API void rz_config_hold_free(RzConfigHold *h)
Free a RzConfigHold object h.
Definition: hold.c:152
RZ_API void Ht_() free(HtName_(Ht) *ht)
Definition: ht_inc.c:130
voidpf void uLong size
Definition: ioapi.h:138
voidpf uLong offset
Definition: ioapi.h:144
const char int mode
Definition: ioapi.h:137
snprintf
Definition: kernel.h:364
uint8_t ut8
Definition: lh5801.h:11
RZ_API RZ_OWN RzList * rz_list_newf(RzListFree f)
Returns a new initialized RzList pointer and sets the free method.
Definition: list.c:248
RZ_API ut32 rz_list_length(RZ_NONNULL const RzList *list)
Returns the length of the list.
Definition: list.c:109
RZ_API RZ_BORROW RzListIter * rz_list_append(RZ_NONNULL RzList *list, void *data)
Appends at the end of the list a new element.
Definition: list.c:288
RZ_API void rz_list_free(RZ_NONNULL RzList *list)
Empties the list and frees the list pointer.
Definition: list.c:137
void * calloc(size_t number, size_t size)
Definition: malloc.c:102
static size_t min_size
Definition: malloc.c:68
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf static inc static sig const char static mode dup
Definition: sflib.h:68
static static fork const void static count static fd const char const char static newpath char char argv
Definition: sflib.h:40
return strdup("=SP r13\n" "=LR r14\n" "=PC r15\n" "=A0 r0\n" "=A1 r1\n" "=A2 r2\n" "=A3 r3\n" "=ZF zf\n" "=SF nf\n" "=OF vf\n" "=CF cf\n" "=SN or0\n" "gpr lr .32 56 0\n" "gpr pc .32 60 0\n" "gpr cpsr .32 64 0 ____tfiae_________________qvczn\n" "gpr or0 .32 68 0\n" "gpr tf .1 64.5 0 thumb\n" "gpr ef .1 64.9 0 endian\n" "gpr jf .1 64.24 0 java\n" "gpr qf .1 64.27 0 sticky_overflow\n" "gpr vf .1 64.28 0 overflow\n" "gpr cf .1 64.29 0 carry\n" "gpr zf .1 64.30 0 zero\n" "gpr nf .1 64.31 0 negative\n" "gpr itc .4 64.10 0 if_then_count\n" "gpr gef .4 64.16 0 great_or_equal\n" "gpr r0 .32 0 0\n" "gpr r1 .32 4 0\n" "gpr r2 .32 8 0\n" "gpr r3 .32 12 0\n" "gpr r4 .32 16 0\n" "gpr r5 .32 20 0\n" "gpr r6 .32 24 0\n" "gpr r7 .32 28 0\n" "gpr r8 .32 32 0\n" "gpr r9 .32 36 0\n" "gpr r10 .32 40 0\n" "gpr r11 .32 44 0\n" "gpr r12 .32 48 0\n" "gpr r13 .32 52 0\n" "gpr r14 .32 56 0\n" "gpr r15 .32 60 0\n" "gpr r16 .32 64 0\n" "gpr r17 .32 68 0\n")
static const char struct stat static buf struct stat static buf static vhangup int status
Definition: sflib.h:145
RZ_IPI RzCmdStatus GH() rz_cmd_arena_print_handler(RzCore *core, int argc, const char **argv)
char *GH() rz_bin_num_to_type(int bin_num)
RZ_IPI RzCmdStatus GH() rz_cmd_heap_chunk_print_handler(RzCore *core, int argc, const char **argv)
static GHT GH() tcache_get_entry(GH(RTcache) *tcache, int index)
static int GH() print_double_linked_list_bin_simple(RzCore *core, GHT bin, MallocState *main_arena, GHT brk_start)
static bool GH() tcache_read(RzCore *core, GHT tcache_start, GH(RTcache) *tcache)
RZ_IPI RzCmdStatus GH() rz_cmd_heap_info_print_handler(RzCore *core, int argc, const char **argv)
static bool GH() is_tcache(RzCore *core)
static void GH() print_unsortedbin_description(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj)
Prints unsorted bin description for an arena (used for dmhd command)
static GHT GH() get_main_arena_with_symbol(RzCore *core, RzDebugMap *map)
RZ_API RzList *GH() rz_heap_arena_list_wrapper(RzCore *core)
A wrapper around GH(rz_heap_arenas_list) which handles finding main_arena.
static int GH() tcache_get_count(GH(RTcache) *tcache, int index)
RZ_API bool GH() rz_heap_resolve_main_arena(RzCore *core, GHT *m_arena)
Store the base address of main arena at m_arena.
void GH() rz_arena_list_free(RzArenaListItem *item)
RZ_API GH(RzHeapChunk)
Get a heap chunk with base address <addr>
RZ_API RzHeapBin *GH() rz_heap_bin_content(RzCore *core, MallocState *main_arena, int bin_num, GHT m_arena)
Get information about <bin_num> bin from NBINS array of an arena.
RZ_API RzHeapChunkSimple *GH() rz_heap_chunk_wrapper(RzCore *core, GHT addr)
Get info about a heap chunk as RzHeapChunkSimple.
static void GH() print_main_arena_bins(RzCore *core, GHT m_arena, MallocState *main_arena, GHT main_arena_base, GHT global_max_fast, RzHeapBinType format, bool json)
Prints description of bins for main arena for dmhd command.
RZ_API void GH() rz_heap_bin_free(RzHeapBin *bin)
void GH() rz_heap_chunk_free(RzHeapChunkListItem *item)
RZ_IPI RzCmdStatus GH() rz_cmd_heap_arena_bins_print_handler(RzCore *core, int argc, const char **argv, RzOutputMode mode)
void GH() print_malloc_states(RzCore *core, GHT m_arena, MallocState *main_arena, bool json)
RZ_API RzList *GH() rz_heap_arenas_list(RzCore *core, GHT m_arena, MallocState *main_arena)
Get a list of MallocState structs for all the arenas.
RZ_API RzHeapBin *GH() rz_heap_fastbin_content(RzCore *core, MallocState *main_arena, int bin_num)
void GH() print_malloc_info(RzCore *core, GHT m_state, GHT malloc_state)
void GH() print_heap_chunk_simple(RzCore *core, GHT chunk, const char *status, PJ *pj)
Prints compact representation of a heap chunk. Format: Chunk(addr=, size=, flags=)
static void GH() print_smallbin_description(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj)
Prints small bins description for an arena (used for dmhd command)
static void GH() get_brks(RzCore *core, GHT *brk_start, GHT *brk_end)
RZ_IPI RzCmdStatus GH() rz_cmd_main_arena_print_handler(RzCore *core, int argc, const char **argv, RzOutputMode mode)
RZ_IPI RzCmdStatus GH() rz_cmd_heap_tcache_print_handler(RzCore *core, int argc, const char **argv)
RZ_IPI RzCmdStatus GH() rz_cmd_heap_chunks_print_handler(RzCore *core, int argc, const char **argv, RzCmdStateOutput *state)
static int GH() print_bin_content(RzCore *core, MallocState *main_arena, int bin_num, PJ *pj, GHT m_arena)
Prints the heap chunks in a bin with double linked list (small|large|unsorted)
static void GH() print_arena_stats(RzCore *core, GHT m_arena, MallocState *main_arena, GHT global_max_fast, int format)
RZ_IPI int GH() rz_cmd_heap_fastbins_print(void *data, const char *input)
void GH() print_heap_fastbin(RzCore *core, GHT m_arena, MallocState *main_arena, GHT global_max_fast, const char *input, bool main_arena_only, PJ *pj)
static int GH() print_double_linked_list_bin(RzCore *core, MallocState *main_arena, GHT m_arena, GHT offset, GHT num_bin, int graph)
RZ_API RzList *GH() rz_heap_chunks_list(RzCore *core, MallocState *main_arena, GHT m_arena, GHT m_state, bool top_chunk)
Get a list of all the heap chunks in an arena. The chunks are in form of a struct RzHeapChunkListItem...
RZ_IPI int GH() rz_cmd_heap_bins_list_print(RzCore *core, const char *input)
static GHT GH() tcache_chunk_size(RzCore *core, GHT brk_start)
static void GH() print_largebin_description(RzCore *core, GHT m_arena, MallocState *main_arena, PJ *pj)
Prints large bins description for an arena (used for dmhd command)
RZ_API bool GH() rz_heap_write_heap_chunk(RzCore *core, RzHeapChunkSimple *chunk_simple)
Write a heap chunk header to memory.
static GHT GH() get_va_symbol(RzCore *core, const char *path, const char *sym_name)
Find the address of a given symbol.
static GHT GH() get_next_pointer(RzCore *core, GHT pos, GHT next)
RZ_API RzList *GH() rz_heap_tcache_content(RzCore *core, GHT arena_base)
Get a list of bins for the tcache associated with arena with base address arena_base.
void GH() print_inst_minfo(GH(RzHeapInfo) *heap_info, GHT hinfo)
static int GH() print_double_linked_list_bin_graph(RzCore *core, GHT bin, MallocState *main_arena, GHT brk_start)
void GH() print_heap_chunk(RzCore *core, GHT chunk)
RZ_API MallocState *GH() rz_heap_get_arena(RzCore *core, GHT m_state)
Get MallocState struct for arena with given base address if base address is 0 then return MallocState...
static GHT GH() align_address_to_size(ut64 addr, ut64 align)
RZ_API bool GH() rz_heap_update_main_arena(RzCore *core, GHT m_arena, MallocState *main_arena)
Store the MallocState struct of an arena with base address m_arena in main_arena.
static void GH() update_arena_with_tc(GH(RzHeap_MallocState_tcache) *cmain_arena, MallocState *main_arena)
RZ_API void GH() tcache_free(GH(RTcache) *tcache)
static void GH() print_tcache_content(RzCore *core, GHT arena_base, GHT main_arena_base, PJ *pj)
RZ_API RzList *GH() rz_heap_chunks_list_wrapper(RzCore *core, ut64 m_state)
A wrapper around GH(rz_heap_chunks_list) which handles finding the main arena.
static void GH() print_heap_bin(RzCore *core, GHT m_arena, MallocState *main_arena, const char *input)
static bool GH() is_arena(RzCore *core, GHT m_arena, GHT m_state)
static void GH() update_arena_without_tc(GH(RzHeap_MallocState) *cmain_arena, MallocState *main_arena)
#define read_le(x)
#define GHT
#define GHT_MAX
int n
Definition: mipsasm.c:19
int idx
Definition: setup.py:197
#define eprintf(x, y...)
Definition: rlcc.c:7
static RzSocket * s
Definition: rtr.c:28
#define rz_return_if_fail(expr)
Definition: rz_assert.h:100
#define rz_return_val_if_fail(expr, val)
Definition: rz_assert.h:108
enum rz_cmd_status_t RzCmdStatus
@ RZ_CMD_STATUS_OK
command handler exited in the right way
Definition: rz_cmd.h:24
@ RZ_CMD_STATUS_ERROR
command handler had issues while running (e.g. allocation error, etc.)
Definition: rz_cmd.h:26
RZ_API bool rz_file_exists(const char *str)
Definition: file.c:192
#define PRINT_RA(msg)
Definition: rz_heap_glibc.h:22
#define SZ
Definition: rz_heap_glibc.h:32
#define PRINTF_RA(fmt,...)
Definition: rz_heap_glibc.h:16
#define PRINT_GA(msg)
Definition: rz_heap_glibc.h:20
#define largebin_index(size)
Definition: rz_heap_glibc.h:72
#define TCACHE_MAX_BINS
Definition: rz_heap_glibc.h:37
#define TCACHE_NEW_VERSION
Definition: rz_heap_glibc.h:39
#define TC_HDR_SZ
Definition: rz_heap_glibc.h:47
#define PRINTF_GA(fmt,...)
Definition: rz_heap_glibc.h:14
#define PRINT_BA(msg)
Definition: rz_heap_glibc.h:21
@ RZ_HEAP_BIN_TCACHE
@ RZ_HEAP_BIN_LARGE
@ RZ_HEAP_BIN_FAST
@ RZ_HEAP_BIN_SMALL
@ RZ_HEAP_BIN_ANY
@ RZ_HEAP_BIN_UNSORTED
#define NON_MAIN_ARENA
Definition: rz_heap_glibc.h:26
#define PRINT_YA(msg)
Definition: rz_heap_glibc.h:19
#define FASTBIN_IDX_TO_SIZE(i)
Definition: rz_heap_glibc.h:33
@ OLD
@ NEW
#define BINMAPSIZE
Definition: rz_heap_glibc.h:35
#define MMAP_OFFSET
Definition: rz_heap_glibc.h:43
#define PRINTF_YA(fmt,...)
Definition: rz_heap_glibc.h:13
#define IS_MMAPPED
Definition: rz_heap_glibc.h:25
#define PREV_INUSE
Definition: rz_heap_glibc.h:24
#define NBINS
Definition: rz_heap_glibc.h:28
#define fastbin_index(size)
Definition: rz_heap_glibc.h:75
#define NFASTBINS
Definition: rz_heap_glibc.h:30
#define PRINTF_BA(fmt,...)
Definition: rz_heap_glibc.h:15
enum rz_heap_bin_type RzHeapBinType
#define NSMALLBINS
Definition: rz_heap_glibc.h:29
RZ_API bool rz_io_read_at(RzIO *io, ut64 addr, ut8 *buf, int len)
Definition: io.c:300
RZ_API RZ_BORROW RzPVector * rz_io_maps(RzIO *io)
Returns the pointer to vector containing maps list.
Definition: io_map.c:435
RZ_API bool rz_io_write_at(RzIO *io, ut64 addr, const ut8 *buf, int len)
Definition: io.c:358
RZ_API int rz_io_nread_at(RzIO *io, ut64 addr, ut8 *buf, int len)
Definition: io.c:338
void(* RzListFree)(void *ptr)
Definition: rz_list.h:11
RZ_API ut64 rz_num_get(RzNum *num, const char *str)
Definition: unum.c:172
RZ_API double rz_num_get_float(RzNum *num, const char *str)
Definition: unum.c:536
RZ_API char * rz_num_units(char *buf, size_t len, ut64 number)
Definition: unum.c:108
RZ_API PJ * pj_ka(PJ *j, const char *k)
Definition: pj.c:163
RZ_API PJ * pj_new(void)
Definition: pj.c:25
RZ_API PJ * pj_end(PJ *j)
Definition: pj.c:87
RZ_API const char * pj_string(PJ *pj)
Definition: pj.c:57
RZ_API void pj_free(PJ *j)
Definition: pj.c:34
RZ_API PJ * pj_o(PJ *j)
Definition: pj.c:75
RZ_API PJ * pj_ks(PJ *j, const char *k, const char *v)
Definition: pj.c:170
RZ_API PJ * pj_kn(PJ *j, const char *k, ut64 n)
Definition: pj.c:121
#define RZ_PRINT_FLAGS_HEADER
Definition: rz_print.h:18
RZ_API char * rz_str_newf(const char *fmt,...) RZ_PRINTF_CHECK(1
RZ_API char * rz_str_new(const char *str)
Definition: str.c:865
RZ_API char RZ_API char * rz_str_newlen(const char *str, int len)
Definition: str.c:871
RZ_API int rz_snprintf(char *string, int len, const char *fmt,...) RZ_PRINTF_CHECK(3
#define PFMT64d
Definition: rz_types.h:394
#define RZ_NEW0(x)
Definition: rz_types.h:284
#define RZ_PERM_RW
Definition: rz_types.h:96
#define PFMT64u
Definition: rz_types.h:395
RzOutputMode
Enum to describe the way data are printed.
Definition: rz_types.h:38
@ RZ_OUTPUT_MODE_LONG
Definition: rz_types.h:44
@ RZ_OUTPUT_MODE_JSON
Definition: rz_types.h:40
@ RZ_OUTPUT_MODE_LONG_JSON
Definition: rz_types.h:45
@ RZ_OUTPUT_MODE_RIZIN
Definition: rz_types.h:41
@ RZ_OUTPUT_MODE_STANDARD
Definition: rz_types.h:39
#define PFMT64x
Definition: rz_types.h:393
#define RZ_EMPTY
Definition: rz_types_base.h:68
#define rz_pvector_foreach(vec, it)
Definition: rz_vector.h:334
static int
Definition: sfsocketcall.h:114
#define h(i)
Definition: sha256.c:48
Definition: malloc.c:26
int fd
Definition: gzjoin.c:80
Definition: malloc.c:21
Definition: zipcmp.c:77
Definition: z80asm.h:102
Definition: rz_pj.h:12
MallocState * arena
XX curplugin == o->plugin.
Definition: rz_bin.h:298
char * arch
Definition: rz_bin.h:214
Represent the output state of a command handler.
Definition: rz_cmd.h:91
RzConsPrintablePalette pal
Definition: rz_cons.h:491
RzConsContext * context
Definition: rz_cons.h:502
ut64 offset
Definition: rz_core.h:301
ut64 prompt_offset
Definition: rz_core.h:302
char * message
RzList * chunks
unsigned int attached_threads
Definition: dis.h:43
char * message
Definition: main.c:12
int pos
Definition: main.c:11
#define bool
Definition: sysdefs.h:146
struct bin bins[64]
Definition: malloc.c:34
void error(const char *msg)
Definition: untgz.c:593
ut64(WINAPI *w32_GetEnabledXStateFeatures)()
static int addr
Definition: z80asm.c:58
static bool input(void *ud, zip_uint8_t *data, zip_uint64_t length)