Rizin
unix-like reverse engineering framework and cli tools
block.c
Go to the documentation of this file.
1 // SPDX-FileCopyrightText: 2019-2020 pancake <pancake@nopcode.org>
2 // SPDX-FileCopyrightText: 2019-2020 thestr4ng3r <info@florianmaerkl.de>
3 // SPDX-License-Identifier: LGPL-3.0-only
4 
5 #include <rz_analysis.h>
6 #include <rz_hash.h>
7 #include <ht_uu.h>
8 #include <assert.h>
9 
10 #define unwrap(rbnode) ((rbnode) ? container_of(rbnode, RzAnalysisBlock, _rb) : NULL)
11 
12 static void __max_end(RBNode *node) {
13  RzAnalysisBlock *block = unwrap(node);
14  block->_max_end = block->addr + block->size;
15  int i;
16  for (i = 0; i < 2; i++) {
17  if (node->child[i]) {
18  ut64 end = unwrap(node->child[i])->_max_end;
19  if (end > block->_max_end) {
20  block->_max_end = end;
21  }
22  }
23  }
24 }
25 
26 static int __bb_addr_cmp(const void *incoming, const RBNode *in_tree, void *user) {
27  ut64 incoming_addr = *(ut64 *)incoming;
28  const RzAnalysisBlock *in_tree_block = container_of(in_tree, const RzAnalysisBlock, _rb);
29  if (incoming_addr < in_tree_block->addr) {
30  return -1;
31  }
32  if (incoming_addr > in_tree_block->addr) {
33  return 1;
34  }
35  return 0;
36 }
37 
38 #define D if (analysis && analysis->verbose)
39 
41  assert(bb->ref > 0); // 0-refd must already be freed.
42  bb->ref++;
43 }
44 
45 #define DFLT_NINSTR 3
46 
49  if (!block) {
50  return NULL;
51  }
52  block->addr = addr;
53  block->size = size;
54  block->analysis = a;
55  block->ref = 1;
56  block->jump = UT64_MAX;
57  block->fail = UT64_MAX;
58  block->op_pos = RZ_NEWS0(ut16, DFLT_NINSTR);
59  block->op_pos_size = DFLT_NINSTR;
60  block->stackptr = 0;
61  block->parent_stackptr = INT_MAX;
62  block->cmpval = UT64_MAX;
63  block->fcns = rz_list_new();
64  if (size) {
66  }
67  return block;
68 }
69 
70 static void block_free(RzAnalysisBlock *block) {
71  if (!block) {
72  return;
73  }
75  free(block->fingerprint);
77  free(block->op_bytes);
79  rz_list_free(block->fcns);
80  free(block->op_pos);
81  free(block->parent_reg_arena);
82  free(block);
83 }
84 
85 void __block_free_rb(RBNode *node, void *user) {
86  RzAnalysisBlock *block = unwrap(node);
87  block_free(block);
88 }
89 
91  RBNode *node = rz_rbtree_find(analysis->bb_tree, &addr, __bb_addr_cmp, NULL);
92  return unwrap(node);
93 }
94 
95 // This is a special case of what rz_interval_node_all_in() does
96 static bool all_in(RzAnalysisBlock *node, ut64 addr, RzAnalysisBlockCb cb, void *user) {
97  while (node && addr < node->addr) {
98  // less than the current node, but might still be contained further down
99  node = unwrap(node->_rb.child[0]);
100  }
101  if (!node) {
102  return true;
103  }
104  if (addr >= node->_max_end) {
105  return true;
106  }
107  if (addr < node->addr + node->size) {
108  if (!cb(node, user)) {
109  return false;
110  }
111  }
112  // This can be done more efficiently by building the stack manually
113  if (!all_in(unwrap(node->_rb.child[0]), addr, cb, user)) {
114  return false;
115  }
116  if (!all_in(unwrap(node->_rb.child[1]), addr, cb, user)) {
117  return false;
118  }
119  return true;
120 }
121 
122 RZ_API bool rz_analysis_blocks_foreach_in(RzAnalysis *analysis, ut64 addr, RzAnalysisBlockCb cb, void *user) {
123  return all_in(unwrap(analysis->bb_tree), addr, cb, user);
124 }
125 
126 static bool block_list_cb(RzAnalysisBlock *block, void *user) {
127  RzList *list = user;
128  rz_analysis_block_ref(block);
129  rz_list_push(list, block);
130  return true;
131 }
132 
133 RZ_API RzList /*<RzAnalysisBlock *>*/ *rz_analysis_get_blocks_in(RzAnalysis *analysis, ut64 addr) {
135  if (list) {
137  }
138  return list;
139 }
140 
141 static void all_intersect(RzAnalysisBlock *node, ut64 addr, ut64 size, RzAnalysisBlockCb cb, void *user) {
142  ut64 end = addr + size;
143  while (node && end <= node->addr) {
144  // less than the current node, but might still be contained further down
145  node = unwrap(node->_rb.child[0]);
146  }
147  if (!node) {
148  return;
149  }
150  if (addr >= node->_max_end) {
151  return;
152  }
153  if (addr < node->addr + node->size) {
154  cb(node, user);
155  }
156  // This can be done more efficiently by building the stack manually
157  all_intersect(unwrap(node->_rb.child[0]), addr, size, cb, user);
158  all_intersect(unwrap(node->_rb.child[1]), addr, size, cb, user);
159 }
160 
161 RZ_API void rz_analysis_blocks_foreach_intersect(RzAnalysis *analysis, ut64 addr, ut64 size, RzAnalysisBlockCb cb, void *user) {
162  all_intersect(unwrap(analysis->bb_tree), addr, size, cb, user);
163 }
164 
167  if (!list) {
168  return NULL;
169  }
171  return list;
172 }
173 
175  if (rz_analysis_get_block_at(analysis, addr)) {
176  return NULL;
177  }
178  RzAnalysisBlock *block = block_new(analysis, addr, size);
179  if (!block) {
180  return NULL;
181  }
182  rz_rbtree_aug_insert(&analysis->bb_tree, &block->addr, &block->_rb, __bb_addr_cmp, NULL, __max_end);
183  return block;
184 }
185 
188  while (!rz_list_empty(bb->fcns)) {
190  }
192 }
193 
195  if (block->size == size) {
196  return;
197  }
198 
199  // Update the block's function's cached ranges
200  RzAnalysisFunction *fcn;
201  RzListIter *iter;
202  rz_list_foreach (block->fcns, iter, fcn) {
203  if (fcn->meta._min != UT64_MAX && fcn->meta._max == block->addr + block->size) {
204  fcn->meta._max = block->addr + size;
205  }
206  }
207 
208  // Do the actual resize
209  block->size = size;
211 }
212 
214  if (block->addr == addr) {
217  return true;
218  }
219  if (rz_analysis_get_block_at(block->analysis, addr)) {
220  // Two blocks at the same addr is illegle you know...
221  return false;
222  }
223 
224  // Update the block's function's cached ranges
225  RzAnalysisFunction *fcn;
226  RzListIter *iter;
227  rz_list_foreach (block->fcns, iter, fcn) {
228  if (fcn->meta._min != UT64_MAX) {
229  if (addr + size > fcn->meta._max) {
230  // we extend after the maximum, so we are the maximum afterwards.
231  fcn->meta._max = addr + size;
232  } else if (block->addr + block->size == fcn->meta._max && addr + size != block->addr + block->size) {
233  // we were the maximum before and may not be it afterwards, not trivial to recalculate.
234  fcn->meta._min = UT64_MAX;
235  continue;
236  }
237  if (block->addr < fcn->meta._min) {
238  // less than the minimum, we know that we are the minimum afterwards.
239  fcn->meta._min = addr;
240  } else if (block->addr == fcn->meta._min && addr != block->addr) {
241  // we were the minimum before and may not be it afterwards, not trivial to recalculate.
242  fcn->meta._min = UT64_MAX;
243  }
244  }
245  }
246 
248  block->addr = addr;
249  block->size = size;
251  rz_rbtree_aug_insert(&block->analysis->bb_tree, &block->addr, &block->_rb, __bb_addr_cmp, NULL, __max_end);
252  return true;
253 }
254 
256  RzAnalysis *analysis = bbi->analysis;
257  rz_return_val_if_fail(bbi && addr >= bbi->addr && addr < bbi->addr + bbi->size && addr != UT64_MAX, 0);
258  if (addr == bbi->addr) {
259  rz_analysis_block_ref(bbi); // ref to be consistent with splitted return refcount
260  return bbi;
261  }
262 
264  // can't have two bbs at the same addr
265  return NULL;
266  }
267 
268  // create the second block
269  RzAnalysisBlock *bb = block_new(analysis, addr, bbi->addr + bbi->size - addr);
270  if (!bb) {
271  return NULL;
272  }
273  bb->jump = bbi->jump;
274  bb->fail = bbi->fail;
275  bb->parent_stackptr = bbi->stackptr;
276  bb->switch_op = bbi->switch_op;
277 
278  // resize the first block
279  rz_analysis_block_set_size(bbi, addr - bbi->addr);
280  bbi->jump = addr;
281  bbi->fail = UT64_MAX;
282  bbi->switch_op = NULL;
284 
285  // insert the second block into the tree
286  rz_rbtree_aug_insert(&analysis->bb_tree, &bb->addr, &bb->_rb, __bb_addr_cmp, NULL, __max_end);
287 
288  // insert the second block into all functions of the first
289  RzListIter *iter;
290  RzAnalysisFunction *fcn;
291  rz_list_foreach (bbi->fcns, iter, fcn) {
293  }
294 
295  // recalculate offset of instructions in both bb and bbi
296  int i;
297  i = 0;
298  while (i < bbi->ninstr && rz_analysis_block_get_op_offset(bbi, i) < bbi->size) {
299  i++;
300  }
301  int new_bbi_instr = i;
302  if (bb->addr - bbi->addr == rz_analysis_block_get_op_offset(bbi, i)) {
303  bb->ninstr = 0;
304  while (i < bbi->ninstr) {
305  ut16 off_op = rz_analysis_block_get_op_offset(bbi, i);
306  if (off_op >= bbi->size + bb->size) {
307  break;
308  }
309  rz_analysis_block_set_op_offset(bb, bb->ninstr, off_op - bbi->size);
310  bb->ninstr++;
311  i++;
312  }
313  }
314  bbi->ninstr = new_bbi_instr;
315  return bb;
316 }
317 
319  if (!rz_analysis_block_is_contiguous(a, b)) {
320  return false;
321  }
322 
323  // check if function lists are identical
324  if (rz_list_length(a->fcns) != rz_list_length(b->fcns)) {
325  return false;
326  }
327  RzAnalysisFunction *fcn;
328  RzListIter *iter;
329  rz_list_foreach (a->fcns, iter, fcn) {
330  if (!rz_list_contains(b->fcns, fcn)) {
331  return false;
332  }
333  }
334 
335  // Keep a ref to b, but remove all references of b from its functions
337  while (!rz_list_empty(b->fcns)) {
339  }
340 
341  // merge ops from b into a
342  size_t i;
343  for (i = 0; i < b->ninstr; i++) {
345  }
346 
347  // merge everything else into a
348  a->size += b->size;
349  a->jump = b->jump;
350  a->fail = b->fail;
351  if (a->switch_op) {
352  RZ_LOG_DEBUG("Dropping switch table at 0x%" PFMT64x " of block at 0x%" PFMT64x "\n", a->switch_op->addr, a->addr);
353  rz_analysis_switch_op_free(a->switch_op);
354  }
355  a->switch_op = b->switch_op;
356  b->switch_op = NULL;
358 
359  // kill b completely
360  rz_rbtree_aug_delete(&a->analysis->bb_tree, &b->addr, __bb_addr_cmp, NULL, __block_free_rb, NULL, __max_end);
361 
362  // invalidate ranges of a's functions
363  rz_list_foreach (a->fcns, iter, fcn) {
364  fcn->meta._min = UT64_MAX;
365  }
366 
367  return true;
368 }
369 
371  if (!bb) {
372  return;
373  }
374  assert(bb->ref > 0);
375  bb->ref--;
376  assert(bb->ref >= rz_list_length(bb->fcns)); // all of the block's functions must hold a reference to it
377  if (bb->ref < 1) {
378  RzAnalysis *analysis = bb->analysis;
379  assert(!bb->fcns || rz_list_empty(bb->fcns));
381  }
382 }
383 
384 RZ_API bool rz_analysis_block_successor_addrs_foreach(RzAnalysisBlock *block, RzAnalysisAddrCb cb, void *user) {
385 #define CB_ADDR(addr) \
386  do { \
387  if (addr == UT64_MAX) { \
388  break; \
389  } \
390  if (!cb(addr, user)) { \
391  return false; \
392  } \
393  } while (0);
394 
395  CB_ADDR(block->jump);
396  CB_ADDR(block->fail);
397  if (block->switch_op && block->switch_op->cases) {
398  RzListIter *iter;
399  RzAnalysisCaseOp *caseop;
400  rz_list_foreach (block->switch_op->cases, iter, caseop) {
401  CB_ADDR(caseop->jump);
402  }
403  }
404 
405  return true;
406 #undef CB_ADDR
407 }
408 
411  RzPVector /*<RzAnalysisBlock>*/ to_visit;
412  HtUP *visited;
414 
415 static bool block_recurse_successor_cb(ut64 addr, void *user) {
417  if (ht_up_find_kv(ctx->visited, addr, NULL)) {
418  // already visited
419  return true;
420  }
421  ht_up_insert(ctx->visited, addr, NULL);
422  RzAnalysisBlock *block = rz_analysis_get_block_at(ctx->analysis, addr);
423  if (!block) {
424  return true;
425  }
426  rz_pvector_push(&ctx->to_visit, block);
427  return true;
428 }
429 
430 RZ_API bool rz_analysis_block_recurse(RzAnalysisBlock *block, RzAnalysisBlockCb cb, void *user) {
431  bool breaked = false;
433  ctx.analysis = block->analysis;
434  rz_pvector_init(&ctx.to_visit, NULL);
435  ctx.visited = ht_up_new0();
436  if (!ctx.visited) {
437  goto beach;
438  }
439 
440  ht_up_insert(ctx.visited, block->addr, NULL);
441  rz_pvector_push(&ctx.to_visit, block);
442 
443  while (!rz_pvector_empty(&ctx.to_visit)) {
444  RzAnalysisBlock *cur = rz_pvector_pop(&ctx.to_visit);
445  breaked = !cb(cur, user);
446  if (breaked) {
447  break;
448  }
450  }
451 
452 beach:
453  ht_up_free(ctx.visited);
454  rz_pvector_clear(&ctx.to_visit);
455  return !breaked;
456 }
457 
458 RZ_API bool rz_analysis_block_recurse_followthrough(RzAnalysisBlock *block, RzAnalysisBlockCb cb, void *user) {
459  bool breaked = false;
461  ctx.analysis = block->analysis;
462  rz_pvector_init(&ctx.to_visit, NULL);
463  ctx.visited = ht_up_new0();
464  if (!ctx.visited) {
465  goto beach;
466  }
467 
468  ht_up_insert(ctx.visited, block->addr, NULL);
469  rz_pvector_push(&ctx.to_visit, block);
470 
471  while (!rz_pvector_empty(&ctx.to_visit)) {
472  RzAnalysisBlock *cur = rz_pvector_pop(&ctx.to_visit);
473  bool b = !cb(cur, user);
474  if (b) {
475  breaked = true;
476  } else {
478  }
479  }
480 
481 beach:
482  ht_up_free(ctx.visited);
483  rz_pvector_clear(&ctx.to_visit);
484  return !breaked;
485 }
486 
487 typedef struct {
491 
492 RZ_API bool rz_analysis_block_recurse_depth_first(RzAnalysisBlock *block, RzAnalysisBlockCb cb, RZ_NULLABLE RzAnalysisBlockCb on_exit, void *user) {
493  rz_return_val_if_fail(block && cb, true);
494  bool breaked = false;
495  HtUP *visited = ht_up_new0();
496  if (!visited) {
497  goto beach;
498  }
499  RzAnalysis *analysis = block->analysis;
500  RzVector path;
502  RzAnalysisBlock *cur_bb = block;
503  RecurseDepthFirstCtx ctx = { cur_bb, NULL };
504  rz_vector_push(&path, &ctx);
505  ht_up_insert(visited, cur_bb->addr, NULL);
506  breaked = !cb(cur_bb, user);
507  if (breaked) {
508  goto beach;
509  }
510  do {
511  RecurseDepthFirstCtx *cur_ctx = rz_vector_index_ptr(&path, path.len - 1);
512  cur_bb = cur_ctx->bb;
513  if (cur_bb->jump != UT64_MAX && !ht_up_find_kv(visited, cur_bb->jump, NULL)) {
514  cur_bb = rz_analysis_get_block_at(analysis, cur_bb->jump);
515  } else if (cur_bb->fail != UT64_MAX && !ht_up_find_kv(visited, cur_bb->fail, NULL)) {
516  cur_bb = rz_analysis_get_block_at(analysis, cur_bb->fail);
517  } else {
518  if (cur_bb->switch_op && !cur_ctx->switch_it) {
519  cur_ctx->switch_it = rz_list_head(cur_bb->switch_op->cases);
520  } else if (cur_ctx->switch_it) {
521  cur_ctx->switch_it = rz_list_iter_get_next(cur_ctx->switch_it);
522  }
523  if (cur_ctx->switch_it) {
525  while (ht_up_find_kv(visited, cop->jump, NULL)) {
526  cur_ctx->switch_it = rz_list_iter_get_next(cur_ctx->switch_it);
527  if (!cur_ctx->switch_it) {
528  cop = NULL;
529  break;
530  }
531  cop = rz_list_iter_get_data(cur_ctx->switch_it);
532  }
533  cur_bb = cop ? rz_analysis_get_block_at(analysis, cop->jump) : NULL;
534  } else {
535  cur_bb = NULL;
536  }
537  }
538  if (cur_bb) {
539  RecurseDepthFirstCtx ctx = { cur_bb, NULL };
540  rz_vector_push(&path, &ctx);
541  ht_up_insert(visited, cur_bb->addr, NULL);
542  bool breaked = !cb(cur_bb, user);
543  if (breaked) {
544  break;
545  }
546  } else {
547  if (on_exit) {
548  on_exit(cur_ctx->bb, user);
549  }
551  }
552  } while (!rz_vector_empty(&path));
553 
554 beach:
555  ht_up_free(visited);
557  return !breaked;
558 }
559 
560 static bool recurse_list_cb(RzAnalysisBlock *block, void *user) {
561  RzList *list = user;
562  rz_analysis_block_ref(block);
563  rz_list_push(list, block);
564  return true;
565 }
566 
569  if (ret) {
571  }
572  return ret;
573 }
574 
575 RZ_API void rz_analysis_block_add_switch_case(RzAnalysisBlock *block, ut64 switch_addr, ut64 case_value, ut64 case_addr) {
576  if (!block->switch_op) {
577  block->switch_op = rz_analysis_switch_op_new(switch_addr, 0, 0, 0);
578  }
579  rz_analysis_switch_op_add_case(block->switch_op, case_addr, case_value, case_addr);
580 }
581 
583  if (!rz_analysis_block_contains(bb, addr)) {
584  return false;
585  }
586  ut64 off = addr - bb->addr;
587  if (off > UT16_MAX) {
588  return false;
589  }
590  size_t i;
591  for (i = 0; i < bb->ninstr; i++) {
592  ut16 inst_off = rz_analysis_block_get_op_offset(bb, i);
593  if (off == inst_off) {
594  return true;
595  }
596  }
597  return false;
598 }
599 
600 typedef struct {
604  RzPVector /*<RzAnalysisBlock>*/ *next_visit; // accumulate block of the next level in the tree
605  HtUP /*<RzAnalysisBlock>*/ *visited; // maps addrs to their previous block (or NULL for entry)
606 } PathContext;
607 
608 static bool shortest_path_successor_cb(ut64 addr, void *user) {
609  PathContext *ctx = user;
610  if (ht_up_find_kv(ctx->visited, addr, NULL)) {
611  // already visited
612  return true;
613  }
614  ht_up_insert(ctx->visited, addr, ctx->cur_parent);
615  RzAnalysisBlock *block = rz_analysis_get_block_at(ctx->analysis, addr);
616  if (block) {
617  rz_pvector_push(ctx->next_visit, block);
618  }
619  return addr != ctx->dst; // break if we found our destination
620 }
621 
623  RzList *ret = NULL;
625  ctx.analysis = block->analysis;
626  ctx.dst = dst;
627 
628  // two vectors to swap cur_visit/next_visit
629  RzPVector visit_a;
630  rz_pvector_init(&visit_a, NULL);
631  RzPVector visit_b;
632  rz_pvector_init(&visit_b, NULL);
633  ctx.next_visit = &visit_a;
634  RzPVector *cur_visit = &visit_b; // cur visit is the current level in the tree
635 
636  ctx.visited = ht_up_new0();
637  if (!ctx.visited) {
638  goto beach;
639  }
640 
641  ht_up_insert(ctx.visited, block->addr, NULL);
642  rz_pvector_push(cur_visit, block);
643 
644  // BFS
645  while (!rz_pvector_empty(cur_visit)) {
646  void **it;
647  rz_pvector_foreach (cur_visit, it) {
648  RzAnalysisBlock *cur = *it;
649  ctx.cur_parent = cur;
651  }
652  RzPVector *tmp = cur_visit;
653  cur_visit = ctx.next_visit;
654  ctx.next_visit = tmp;
655  rz_pvector_clear(ctx.next_visit);
656  }
657 
658  // reconstruct the path
659  bool found = false;
660  RzAnalysisBlock *prev = ht_up_find(ctx.visited, dst, &found);
662  if (found && dst_block) {
664  rz_analysis_block_ref(dst_block);
665  rz_list_prepend(ret, dst_block);
666  while (prev) {
667  rz_analysis_block_ref(prev);
668  rz_list_prepend(ret, prev);
669  prev = ht_up_find(ctx.visited, prev->addr, NULL);
670  }
671  }
672 
673 beach:
674  ht_up_free(ctx.visited);
675  rz_pvector_clear(&visit_a);
676  rz_pvector_clear(&visit_b);
677  return ret;
678 }
679 
681  rz_return_val_if_fail(block, false);
682  if (!block->analysis->iob.read_at) {
683  return false;
684  }
685  ut8 *buf = malloc(block->size);
686  if (!buf) {
687  return false;
688  }
689  if (!block->analysis->iob.read_at(block->analysis->iob.io, block->addr, buf, block->size)) {
690  free(buf);
691  return false;
692  }
693  ut32 cur_hash = rz_hash_xxhash(block->analysis->hash, buf, block->size);
694  free(buf);
695  return block->bbhash != cur_hash;
696 }
697 
699  rz_return_if_fail(block);
700  if (!block->analysis->iob.read_at) {
701  return;
702  }
703  ut8 *buf = malloc(block->size);
704  if (!buf) {
705  return;
706  }
707  if (!block->analysis->iob.read_at(block->analysis->iob.io, block->addr, buf, block->size)) {
708  free(buf);
709  return;
710  }
711  block->bbhash = rz_hash_xxhash(block->analysis->hash, buf, block->size);
712  free(buf);
713 }
714 
715 typedef struct {
717  bool reachable;
719 
720 static void noreturn_successor_free(HtUPKv *kv) {
721  NoreturnSuccessor *succ = kv->value;
723  free(succ);
724 }
725 
726 static bool noreturn_successors_cb(RzAnalysisBlock *block, void *user) {
727  HtUP *succs = user;
729  if (!succ) {
730  return false;
731  }
732  rz_analysis_block_ref(block);
733  succ->block = block;
734  succ->reachable = false; // reset for first iteration
735  ht_up_insert(succs, block->addr, succ);
736  return true;
737 }
738 
739 static bool noreturn_successors_reachable_cb(RzAnalysisBlock *block, void *user) {
740  HtUP *succs = user;
741  NoreturnSuccessor *succ = ht_up_find(succs, block->addr, NULL);
742  if (succ) {
743  succ->reachable = true;
744  }
745  return true;
746 }
747 
748 static bool noreturn_remove_unreachable_cb(void *user, const ut64 k, const void *v) {
749  RzAnalysisFunction *fcn = user;
751  if (!succ->reachable && rz_list_contains(succ->block->fcns, fcn)) {
753  }
754  succ->reachable = false; // reset for next iteration
755  return true;
756 }
757 
758 static bool noreturn_get_blocks_cb(void *user, const ut64 k, const void *v) {
759  RzList *blocks = user;
762  rz_list_push(blocks, succ->block);
763  return true;
764 }
765 
767  rz_return_val_if_fail(block, NULL);
768  if (!rz_analysis_block_contains(block, addr) || addr == block->addr) {
769  return block;
770  }
771  rz_analysis_block_ref(block);
772 
773  // Cache all recursive successors of block here.
774  // These are the candidates that we might have to remove from functions later.
775  HtUP *succs = ht_up_new(NULL, noreturn_successor_free, NULL); // maps block addr (ut64) => NoreturnSuccessor *
776  if (!succs) {
777  return block;
778  }
780 
781  // Chop the block. Resize and remove all destination addrs
782  rz_analysis_block_set_size(block, addr - block->addr);
784  block->jump = UT64_MAX;
785  block->fail = UT64_MAX;
787  block->switch_op = NULL;
788 
789  // Now, for each fcn, check which of our successors are still reachable in the function remove and the ones that are not.
790  RzListIter *it;
791  RzAnalysisFunction *fcn;
792  // We need to clone the list because block->fcns will get modified in the loop
793  RzList *fcns_cpy = rz_list_clone(block->fcns);
794  rz_list_foreach (fcns_cpy, it, fcn) {
796  if (entry && rz_list_contains(entry->fcns, fcn)) {
798  }
799  ht_up_foreach(succs, noreturn_remove_unreachable_cb, fcn);
800  }
801  rz_list_free(fcns_cpy);
802 
803  // This last step isn't really critical, but nice to have.
804  // Prepare to merge blocks with their predecessors if possible
805  RzList merge_blocks;
806  rz_list_init(&merge_blocks);
807  merge_blocks.free = (RzListFree)rz_analysis_block_unref;
808  ht_up_foreach(succs, noreturn_get_blocks_cb, &merge_blocks);
809 
810  // Free/unref BEFORE doing the merge!
811  // Some of the blocks might not be valid anymore later!
813  ht_up_free(succs);
814 
815  ut64 block_addr = block->addr; // save the addr to identify the block. the automerge might free it so we must not use the pointer!
816 
817  // Do the actual merge
818  rz_analysis_block_automerge(&merge_blocks);
819 
820  // No try to recover the pointer to the block if it still exists
821  RzAnalysisBlock *ret = NULL;
822  for (it = merge_blocks.head; it && (block = it->data, 1); it = it->n) {
823  if (block->addr == block_addr) {
824  // block is still there
825  ret = block;
826  break;
827  }
828  }
829 
830  rz_list_purge(&merge_blocks);
831  return ret;
832 }
833 
834 typedef struct {
835  HtUP *predecessors; // maps a block to its predecessor if it has exactly one, or NULL if there are multiple or the predecessor has multiple successors
836  HtUP *visited_blocks; // during predecessor search, mark blocks whose successors we already checked. Value is void *-casted count of successors
837  HtUP *blocks; // adresses of the blocks we might want to merge with their predecessors => RzAnalysisBlock *
838 
841 } AutomergeCtx;
842 
843 static bool count_successors_cb(ut64 addr, void *user) {
844  AutomergeCtx *ctx = user;
845  ctx->cur_succ_count++;
846  return true;
847 }
848 
849 static bool automerge_predecessor_successor_cb(ut64 addr, void *user) {
850  AutomergeCtx *ctx = user;
851  ctx->cur_succ_count++;
852  RzAnalysisBlock *block = ht_up_find(ctx->blocks, addr, NULL);
853  if (!block) {
854  // we shouldn't merge this one so GL_DONT_CARE
855  return true;
856  }
857  bool found;
858  RzAnalysisBlock *pred = ht_up_find(ctx->predecessors, (ut64)(size_t)block, &found);
859  if (found) {
860  if (pred) {
861  // only one predecessor found so far, but we are the second so there are multiple now
862  ht_up_update(ctx->predecessors, (ut64)(size_t)block, NULL);
863  } // else: already found multiple predecessors, nothing to do
864  } else {
865  // no predecessor found yet, this is the only one until now
866  ht_up_insert(ctx->predecessors, (ut64)(size_t)block, ctx->cur_pred);
867  }
868  return true;
869 }
870 
871 static bool automerge_get_predecessors_cb(void *user, const ut64 k, const void *v) {
872  AutomergeCtx *ctx = user;
873  const RzAnalysisFunction *fcn = (const RzAnalysisFunction *)(size_t)k;
874  RzListIter *it;
875  RzAnalysisBlock *block;
876  rz_list_foreach (fcn->bbs, it, block) {
877  bool already_visited;
878  ht_up_find(ctx->visited_blocks, (ut64)(size_t)block, &already_visited);
879  if (already_visited) {
880  continue;
881  }
882  ctx->cur_pred = block;
883  ctx->cur_succ_count = 0;
885  ht_up_insert(ctx->visited_blocks, (ut64)(size_t)block, (void *)ctx->cur_succ_count);
886  }
887  return true;
888 }
889 
890 // Try to find the contiguous predecessors of all given blocks and merge them if possible,
891 // i.e. if there are no other blocks that have this block as one of their successors
892 RZ_API void rz_analysis_block_automerge(RzList /*<RzAnalysisBlock *>*/ *blocks) {
894  AutomergeCtx ctx = {
895  .predecessors = ht_up_new0(),
896  .visited_blocks = ht_up_new0(),
897  .blocks = ht_up_new0()
898  };
899 
900  HtUP *relevant_fcns = ht_up_new0(); // all the functions that contain some of our blocks (ht abused as a set)
901  RzList *fixup_candidates = rz_list_new(); // used further down
902  if (!ctx.predecessors || !ctx.visited_blocks || !ctx.blocks || !relevant_fcns || !fixup_candidates) {
903  goto beach;
904  }
905 
906  // Get all the functions and prepare ctx.blocks
907  RzListIter *it;
908  RzAnalysisBlock *block;
909  rz_list_foreach (blocks, it, block) {
910  RzListIter *fit;
911  RzAnalysisFunction *fcn;
912  rz_list_foreach (block->fcns, fit, fcn) {
913  ht_up_insert(relevant_fcns, (ut64)(size_t)fcn, NULL);
914  }
915  ht_up_insert(ctx.blocks, block->addr, block);
916  }
917 
918  // Get the single predecessors we might want to merge with
919  ht_up_foreach(relevant_fcns, automerge_get_predecessors_cb, &ctx);
920 
921  // Now finally do the merging
922  RzListIter *tmp;
923  rz_list_foreach_safe (blocks, it, tmp, block) {
924  RzAnalysisBlock *predecessor = ht_up_find(ctx.predecessors, (ut64)(size_t)block, NULL);
925  if (!predecessor) {
926  continue;
927  }
928  size_t pred_succs_count = (size_t)ht_up_find(ctx.visited_blocks, (ut64)(size_t)predecessor, NULL);
929  if (pred_succs_count != 1) {
930  // we can only merge this predecessor if it has exactly one successor
931  continue;
932  }
933 
934  // We are about to merge block into predecessor
935  // However if there are other blocks that have block as the predecessor,
936  // we would uaf after the merge since block will be freed.
937  RzListIter *bit;
938  RzAnalysisBlock *clock;
939  for (bit = it->n; bit && (clock = bit->data, 1); bit = bit->n) {
940  RzAnalysisBlock *fixup_pred = ht_up_find(ctx.predecessors, (ut64)(size_t)clock, NULL);
941  if (fixup_pred == block) {
942  rz_list_push(fixup_candidates, clock);
943  }
944  }
945 
946  if (rz_analysis_block_merge(predecessor, block)) { // rz_analysis_block_merge() does checks like contiguous, to that's fine
947  // block was merged into predecessor, it is now freed!
948  // Update number of successors of the predecessor
949  ctx.cur_succ_count = 0;
951  ht_up_update(ctx.visited_blocks, (ut64)(size_t)predecessor, (void *)(size_t)ctx.cur_succ_count);
952  rz_list_foreach (fixup_candidates, bit, clock) {
953  // Make sure all previous pointers to block now go to predecessor
954  ht_up_update(ctx.predecessors, (ut64)(size_t)clock, predecessor);
955  }
956  // Remove it from the list
958  free(it);
959  }
960 
961  rz_list_purge(fixup_candidates);
962  }
963 
964 beach:
965  ht_up_free(ctx.predecessors);
966  ht_up_free(ctx.visited_blocks);
967  ht_up_free(ctx.blocks);
968  ht_up_free(relevant_fcns);
969  rz_list_free(fixup_candidates);
970 }
971 
972 typedef struct {
976 
977 static bool block_from_offset_cb(RzAnalysisBlock *block, void *user) {
979  // If an instruction starts exactly at the search addr, return that block immediately
980  if (rz_analysis_block_op_starts_at(block, ctx->addr)) {
981  ctx->ret = block;
982  return false;
983  }
984  // else search the closest one
985  if (!ctx->ret || ctx->ret->addr < block->addr) {
986  ctx->ret = block;
987  }
988  return true;
989 }
990 
1000  return ctx.ret;
1001 }
1002 
1007  if (i >= block->ninstr) {
1008  return UT16_MAX;
1009  }
1010  return (i > 0 && (i - 1) < block->op_pos_size) ? block->op_pos[i - 1] : 0;
1011 }
1012 
1018  if (offset == UT16_MAX) {
1019  return UT64_MAX;
1020  }
1021  return block->addr + offset;
1022 }
1023 
1028  // the offset 0 of the instruction 0 is not stored because always 0
1029  if (i > 0 && v > 0) {
1030  if (i >= block->op_pos_size) {
1031  size_t new_pos_size = i * 2;
1032  ut16 *tmp_op_pos = realloc(block->op_pos, new_pos_size * sizeof(*block->op_pos));
1033  if (!tmp_op_pos) {
1034  return false;
1035  }
1036  block->op_pos_size = new_pos_size;
1037  block->op_pos = tmp_op_pos;
1038  }
1039  block->op_pos[i - 1] = v;
1040  return true;
1041  }
1042  return true;
1043 }
1044 
1049  ut16 delta, delta_off, last_delta;
1050  int i;
1051 
1052  if (!rz_analysis_block_contains(bb, off)) {
1053  return UT64_MAX;
1054  }
1055  last_delta = 0;
1056  delta_off = off - bb->addr;
1057  for (i = 0; i < bb->ninstr; i++) {
1059  if (delta > delta_off) {
1060  return bb->addr + last_delta;
1061  }
1062  last_delta = delta;
1063  }
1064  return bb->addr + last_delta;
1065 }
1066 
1071  if (i >= bb->ninstr) {
1072  return UT64_MAX;
1073  }
1074  ut16 idx_cur = rz_analysis_block_get_op_offset(bb, i);
1075  ut16 idx_next = rz_analysis_block_get_op_offset(bb, i + 1);
1076  return idx_next != UT16_MAX ? idx_next - idx_cur : bb->size - idx_cur;
1077 }
1078 
1085  rz_return_if_fail(block);
1086  RzAnalysis *a = block->analysis;
1087  if (!a->iob.read_at) {
1088  return;
1089  }
1090  if (block->addr + block->size <= block->addr) {
1091  return;
1092  }
1093  ut8 *buf = malloc(block->size);
1094  if (!buf) {
1095  return;
1096  }
1097  if (!a->iob.read_at(a->iob.io, block->addr, buf, block->size)) {
1098  free(buf);
1099  return;
1100  }
1101  ut64 addr = block->addr;
1102  size_t i = 0;
1103  while (addr < block->addr + block->size) {
1104  RzAnalysisOp op;
1105  if (rz_analysis_op(block->analysis, &op, addr,
1106  buf + (addr - block->addr), block->addr + block->size - addr, 0) <= 0) {
1108  break;
1109  }
1110  if (i > 0) {
1111  ut64 off = addr - block->addr;
1112  if (off >= UT16_MAX) {
1114  break;
1115  }
1117  }
1118  i++;
1119  addr += op.size > 0 ? op.size : 1;
1121  }
1122  block->ninstr = i;
1123  free(buf);
1124 }
ut8 op
Definition: 6502dis.c:13
RZ_API void rz_analysis_diff_free(RzAnalysisDiff *diff)
Definition: diff.c:22
RZ_API void rz_analysis_function_add_block(RzAnalysisFunction *fcn, RzAnalysisBlock *bb)
Definition: function.c:264
RZ_API void rz_analysis_function_remove_block(RzAnalysisFunction *fcn, RzAnalysisBlock *bb)
Definition: function.c:286
lzma_index ** i
Definition: index.h:629
RZ_API void rz_analysis_block_set_size(RzAnalysisBlock *block, ut64 size)
Definition: block.c:194
static bool automerge_get_predecessors_cb(void *user, const ut64 k, const void *v)
Definition: block.c:871
RZ_API bool rz_analysis_block_was_modified(RzAnalysisBlock *block)
Definition: block.c:680
static bool recurse_list_cb(RzAnalysisBlock *block, void *user)
Definition: block.c:560
static RzAnalysisBlock * block_new(RzAnalysis *a, ut64 addr, ut64 size)
Definition: block.c:47
#define unwrap(rbnode)
Definition: block.c:10
static bool block_list_cb(RzAnalysisBlock *block, void *user)
Definition: block.c:126
RZ_API void rz_analysis_blocks_foreach_intersect(RzAnalysis *analysis, ut64 addr, ut64 size, RzAnalysisBlockCb cb, void *user)
Definition: block.c:161
static bool count_successors_cb(ut64 addr, void *user)
Definition: block.c:843
RZ_API void rz_analysis_delete_block(RzAnalysisBlock *bb)
Definition: block.c:186
RZ_API RzAnalysisBlock * rz_analysis_block_chop_noreturn(RzAnalysisBlock *block, ut64 addr)
Definition: block.c:766
RZ_API RzList * rz_analysis_block_recurse_list(RzAnalysisBlock *block)
Definition: block.c:567
static bool block_recurse_successor_cb(ut64 addr, void *user)
Definition: block.c:415
RZ_API bool rz_analysis_block_relocate(RzAnalysisBlock *block, ut64 addr, ut64 size)
Definition: block.c:213
static bool block_from_offset_cb(RzAnalysisBlock *block, void *user)
Definition: block.c:977
RZ_API bool rz_analysis_block_set_op_offset(RzAnalysisBlock *block, size_t i, ut16 v)
Definition: block.c:1027
RZ_API void rz_analysis_block_ref(RzAnalysisBlock *bb)
Definition: block.c:40
RZ_API void rz_analysis_block_update_hash(RzAnalysisBlock *block)
Definition: block.c:698
static bool all_in(RzAnalysisBlock *node, ut64 addr, RzAnalysisBlockCb cb, void *user)
Definition: block.c:96
RZ_API bool rz_analysis_block_recurse_depth_first(RzAnalysisBlock *block, RzAnalysisBlockCb cb, RZ_NULLABLE RzAnalysisBlockCb on_exit, void *user)
Definition: block.c:492
RZ_API bool rz_analysis_blocks_foreach_in(RzAnalysis *analysis, ut64 addr, RzAnalysisBlockCb cb, void *user)
Definition: block.c:122
static bool automerge_predecessor_successor_cb(ut64 addr, void *user)
Definition: block.c:849
static void block_free(RzAnalysisBlock *block)
Definition: block.c:70
RZ_API RzAnalysisBlock * rz_analysis_get_block_at(RzAnalysis *analysis, ut64 addr)
Definition: block.c:90
RZ_API bool rz_analysis_block_recurse_followthrough(RzAnalysisBlock *block, RzAnalysisBlockCb cb, void *user)
Definition: block.c:458
RZ_API ut64 rz_analysis_block_get_op_size(RzAnalysisBlock *bb, size_t i)
Definition: block.c:1070
RZ_API ut64 rz_analysis_block_get_op_addr(RzAnalysisBlock *block, size_t i)
Definition: block.c:1016
RZ_API void rz_analysis_block_unref(RzAnalysisBlock *bb)
Definition: block.c:370
RZ_API void rz_analysis_block_add_switch_case(RzAnalysisBlock *block, ut64 switch_addr, ut64 case_value, ut64 case_addr)
Definition: block.c:575
RZ_API RzList * rz_analysis_get_blocks_in(RzAnalysis *analysis, ut64 addr)
Definition: block.c:133
RZ_API RzAnalysisBlock * rz_analysis_create_block(RzAnalysis *analysis, ut64 addr, ut64 size)
Definition: block.c:174
RZ_API ut64 rz_analysis_block_get_op_addr_in(RzAnalysisBlock *bb, ut64 off)
Definition: block.c:1048
static void noreturn_successor_free(HtUPKv *kv)
Definition: block.c:720
RZ_API RzList * rz_analysis_get_blocks_intersect(RzAnalysis *analysis, ut64 addr, ut64 size)
Definition: block.c:165
static void __max_end(RBNode *node)
Definition: block.c:12
static bool noreturn_get_blocks_cb(void *user, const ut64 k, const void *v)
Definition: block.c:758
RZ_API bool rz_analysis_block_merge(RzAnalysisBlock *a, RzAnalysisBlock *b)
Definition: block.c:318
struct rz_analysis_block_recurse_context_t RzAnalysisBlockRecurseContext
RZ_API RzAnalysisBlock * rz_analysis_block_split(RzAnalysisBlock *bbi, ut64 addr)
Definition: block.c:255
RZ_API RzAnalysisBlock * rz_analysis_find_most_relevant_block_in(RzAnalysis *analysis, ut64 off)
Definition: block.c:997
RZ_API bool rz_analysis_block_recurse(RzAnalysisBlock *block, RzAnalysisBlockCb cb, void *user)
Definition: block.c:430
RZ_API bool rz_analysis_block_successor_addrs_foreach(RzAnalysisBlock *block, RzAnalysisAddrCb cb, void *user)
Definition: block.c:384
RZ_API RZ_NULLABLE RzList * rz_analysis_block_shortest_path(RzAnalysisBlock *block, ut64 dst)
Definition: block.c:622
RZ_API ut16 rz_analysis_block_get_op_offset(RzAnalysisBlock *block, size_t i)
Definition: block.c:1006
static void all_intersect(RzAnalysisBlock *node, ut64 addr, ut64 size, RzAnalysisBlockCb cb, void *user)
Definition: block.c:141
RZ_API void rz_analysis_block_analyze_ops(RzAnalysisBlock *block)
Definition: block.c:1084
void __block_free_rb(RBNode *node, void *user)
Definition: block.c:85
RZ_API bool rz_analysis_block_op_starts_at(RzAnalysisBlock *bb, ut64 addr)
Definition: block.c:582
#define DFLT_NINSTR
Definition: block.c:45
static bool noreturn_successors_cb(RzAnalysisBlock *block, void *user)
Definition: block.c:726
RZ_API void rz_analysis_block_automerge(RzList *blocks)
Definition: block.c:892
static bool shortest_path_successor_cb(ut64 addr, void *user)
Definition: block.c:608
static bool noreturn_remove_unreachable_cb(void *user, const ut64 k, const void *v)
Definition: block.c:748
#define CB_ADDR(addr)
static bool noreturn_successors_reachable_cb(RzAnalysisBlock *block, void *user)
Definition: block.c:739
static int __bb_addr_cmp(const void *incoming, const RBNode *in_tree, void *user)
Definition: block.c:26
RZ_API void rz_analysis_cond_free(RzAnalysisCond *c)
Definition: cond.c:19
#define RZ_API
#define INT_MAX
Definition: cp-demangle.c:131
#define NULL
Definition: cris-opc.c:27
RzCryptoSelector bit
Definition: crypto.c:16
static static fork const void static count static fd const char const char static newpath const char static path const char path
Definition: sflib.h:35
uint16_t ut16
uint32_t ut32
const char * k
Definition: dsignal.c:11
const char * v
Definition: dsignal.c:12
RZ_API ut32 rz_hash_xxhash(RZ_NONNULL RzHash *rh, RZ_NONNULL const ut8 *input, size_t size)
Definition: hash.c:27
RZ_API void Ht_() free(HtName_(Ht) *ht)
Definition: ht_inc.c:130
RZ_API const KEY_TYPE bool * found
Definition: ht_inc.h:130
voidpf void uLong size
Definition: ioapi.h:138
voidpf uLong offset
Definition: ioapi.h:144
voidpf void * buf
Definition: ioapi.h:138
uint8_t ut8
Definition: lh5801.h:11
static void list(RzEgg *egg)
Definition: rz-gg.c:52
RZ_API RZ_BORROW RzListIter * rz_list_iter_get_next(RzListIter *list)
returns the next RzList iterator in the list
Definition: list.c:20
RZ_API RZ_BORROW RzListIter * rz_list_contains(RZ_NONNULL const RzList *list, RZ_NONNULL const void *ptr)
Returns the RzListIter of the given pointer, if found.
Definition: list.c:592
RZ_API RZ_BORROW RzListIter * rz_list_prepend(RZ_NONNULL RzList *list, void *data)
Appends at the beginning of the list a new element.
Definition: list.c:316
RZ_API RZ_OWN RzList * rz_list_newf(RzListFree f)
Returns a new initialized RzList pointer and sets the free method.
Definition: list.c:248
RZ_API RZ_OWN RzList * rz_list_clone(RZ_NONNULL const RzList *list)
Shallow copies of the list (but doesn't free its elements)
Definition: list.c:496
RZ_API void * rz_list_iter_get_data(RzListIter *list)
returns the value stored in the list element
Definition: list.c:42
RZ_API RZ_OWN RzList * rz_list_new(void)
Returns a new initialized RzList pointer (free method is not initialized)
Definition: list.c:235
RZ_API RZ_BORROW void * rz_list_first(RZ_NONNULL const RzList *list)
Returns the first element of the list.
Definition: list.c:77
RZ_API RZ_BORROW RzListIter * rz_list_push(RZ_NONNULL RzList *list, void *item)
Alias for rz_list_append.
Definition: list.c:60
RZ_API ut32 rz_list_length(RZ_NONNULL const RzList *list)
Returns the length of the list.
Definition: list.c:109
RZ_API void rz_list_split_iter(RZ_NONNULL RzList *list, RZ_NONNULL RzListIter *iter)
Definition: list.c:187
RZ_API void rz_list_init(RZ_NONNULL RzList *list)
Initializes the RzList pointer.
Definition: list.c:95
RZ_API void rz_list_free(RZ_NONNULL RzList *list)
Empties the list and frees the list pointer.
Definition: list.c:137
RZ_API void rz_list_purge(RZ_NONNULL RzList *list)
Empties the list without freeing the list pointer.
Definition: list.c:120
void * realloc(void *ptr, size_t size)
Definition: malloc.c:144
void * malloc(size_t size)
Definition: malloc.c:123
char * dst
Definition: lz4.h:724
assert(limit<=UINT32_MAX/2)
RZ_API bool rz_analysis_op_fini(RzAnalysisOp *op)
Definition: op.c:37
RZ_API int rz_analysis_op(RzAnalysis *analysis, RzAnalysisOp *op, ut64 addr, const ut8 *data, int len, RzAnalysisOpMask mask)
Definition: op.c:96
int off
Definition: pal.c:13
#define rz_return_if_fail(expr)
Definition: rz_assert.h:100
#define rz_return_val_if_fail(expr, val)
Definition: rz_assert.h:108
void(* RzListFree)(void *ptr)
Definition: rz_list.h:11
#define RZ_LOG_DEBUG(fmtstr,...)
Definition: rz_log.h:49
RZ_API RBNode * rz_rbtree_find(RBNode *root, void *data, RBComparator cmp, void *user)
Definition: rbtree.c:267
RZ_API bool rz_rbtree_aug_update_sum(RBNode *root, void *data, RBNode *node, RBComparator cmp, void *cmp_user, RBNodeSum sum)
Returns true if the sum has been updated, false if node has not been found.
Definition: rbtree.c:235
RZ_API bool rz_rbtree_aug_delete(RBNode **root, void *data, RBComparator cmp, void *cmp_user, RBNodeFree freefn, void *free_user, RBNodeSum sum)
Returns true if a node with an equal key is deleted.
Definition: rbtree.c:67
RZ_API bool rz_rbtree_aug_insert(RBNode **root, void *data, RBNode *node, RBComparator cmp, void *cmp_user, RBNodeSum sum)
Returns true if the node was inserted successfully.
Definition: rbtree.c:163
#define RZ_NULLABLE
Definition: rz_types.h:65
#define RZ_NEW0(x)
Definition: rz_types.h:284
#define RZ_NEWS0(x, y)
Definition: rz_types.h:282
#define PFMT64x
Definition: rz_types.h:393
#define container_of(ptr, type, member)
Definition: rz_types.h:650
#define UT64_MAX
Definition: rz_types_base.h:86
#define UT16_MAX
RZ_API void rz_vector_pop(RzVector *vec, void *into)
Definition: vector.c:184
static void * rz_vector_index_ptr(RzVector *vec, size_t index)
Definition: rz_vector.h:88
RZ_API void * rz_pvector_pop(RzPVector *vec)
Definition: vector.c:372
RZ_API void rz_pvector_init(RzPVector *vec, RzPVectorFree free)
Definition: vector.c:298
RZ_API void * rz_vector_push(RzVector *vec, void *x)
Definition: vector.c:197
static bool rz_pvector_empty(RzPVector *vec)
Definition: rz_vector.h:246
static void ** rz_pvector_push(RzPVector *vec, void *x)
Definition: rz_vector.h:300
RZ_API void rz_vector_clear(RzVector *vec)
Definition: vector.c:68
RZ_API void rz_vector_init(RzVector *vec, size_t elem_size, RzVectorFree free, void *free_user)
Definition: vector.c:33
static bool rz_vector_empty(const RzVector *vec)
Definition: rz_vector.h:74
RZ_API void rz_pvector_clear(RzPVector *vec)
Definition: vector.c:326
#define rz_pvector_foreach(vec, it)
Definition: rz_vector.h:334
int size_t
Definition: sftypes.h:40
#define b(i)
Definition: sha256.c:42
#define a(i)
Definition: sha256.c:41
RzAnalysisBlock * cur_pred
Definition: block.c:839
HtUP * blocks
Definition: block.c:837
HtUP * predecessors
Definition: block.c:835
size_t cur_succ_count
Definition: block.c:840
HtUP * visited_blocks
Definition: block.c:836
RzAnalysisBlock * ret
Definition: block.c:974
bool reachable
Definition: block.c:717
RzAnalysisBlock * block
Definition: block.c:716
ut64 dst
Definition: block.c:603
RzAnalysisBlock * cur_parent
Definition: block.c:602
RzPVector * next_visit
Definition: block.c:604
HtUP * visited
Definition: block.c:605
RzAnalysis * analysis
Definition: block.c:601
RzAnalysisBlock * bb
Definition: block.c:488
RzListIter * switch_it
Definition: block.c:489
Definition: zipcmp.c:77
ut8 * parent_reg_arena
Definition: rz_analysis.h:877
RzAnalysis * analysis
Definition: rz_analysis.h:887
RzAnalysisCond * cond
Definition: rz_analysis.h:873
RzAnalysisSwitchOp * switch_op
Definition: rz_analysis.h:874
RzAnalysisDiff * diff
Definition: rz_analysis.h:872
RzAnalysisFcnMeta meta
Definition: rz_analysis.h:265
RBTree bb_tree
Definition: rz_analysis.h:564
RzHash * hash
Definition: rz_analysis.h:626
RzIOBind iob
Definition: rz_analysis.h:574
RzIOReadAt read_at
Definition: rz_io.h:240
RzIO * io
Definition: rz_io.h:232
struct rz_list_iter_t * n
Definition: rz_list.h:15
void * data
Definition: rz_list.h:14
RzListIter * head
Definition: rz_list.h:19
RzListFree free
Definition: rz_list.h:21
struct rz_rb_node_t * child[2]
Definition: rz_rbtree.h:20
void on_exit(uv_process_t *req, int64_t exit_status, int term_signal)
Definition: main.c:11
uint64_t blocks
Definition: list.c:104
RZ_API void rz_analysis_switch_op_free(RzAnalysisSwitchOp *swop)
Definition: switch.c:42
RZ_API RzAnalysisSwitchOp * rz_analysis_switch_op_new(ut64 addr, ut64 min_val, ut64 max_val, ut64 def_val)
Definition: switch.c:21
RZ_API RzAnalysisCaseOp * rz_analysis_switch_op_add_case(RzAnalysisSwitchOp *swop, ut64 addr, ut64 value, ut64 jump)
Definition: switch.c:49
Definition: dis.c:32
static st64 delta
Definition: vmenus.c:2425
static bool breaked
ut64(WINAPI *w32_GetEnabledXStateFeatures)()
static const char * cb[]
Definition: z80_tab.h:176
static int addr
Definition: z80asm.c:58