10 #include <mach/mach_host.h>
11 #include <mach/host_priv.h>
12 #include <mach/mach_vm.h>
13 #include <mach/thread_status.h>
14 #include <mach/vm_statistics.h>
16 #include <TargetConditionals.h>
24 #define MAX_MACH_HEADER_SIZE (64 * 1024)
25 #define DYLD_INFO_COUNT 5
26 #define DYLD_INFO_LEGACY_COUNT 1
27 #define DYLD_INFO_32_COUNT 3
28 #define DYLD_INFO_64_COUNT 5
29 #define DYLD_IMAGE_INFO_32_SIZE 12
30 #define DYLD_IMAGE_INFO_64_SIZE 24
31 #define DEBUG_MAP_TAG_ID 239
58 host_t myhost = mach_host_self();
59 mach_port_t psDefault = 0;
60 mach_port_t psDefault_control = 0;
61 task_array_t tasks =
NULL;
62 mach_msg_type_number_t numTasks = 0;
67 kern_return_t kr = processor_set_default(myhost, &psDefault);
68 if (kr != KERN_SUCCESS) {
71 kr = host_processor_set_priv(myhost, psDefault, &psDefault_control);
72 if (kr != KERN_SUCCESS) {
73 eprintf(
"host_processor_set_priv failed with error 0x%x\n", kr);
79 kr = processor_set_tasks(psDefault_control, &tasks, &numTasks);
80 if (kr != KERN_SUCCESS) {
81 eprintf(
"processor_set_tasks failed with error %x\n", kr);
89 for (
i = 0;
i < numTasks;
i++) {
91 pid_for_task(
i, &
pid);
98 vm_deallocate(myhost, (vm_address_t)tasks, numTasks *
sizeof(task_t));
103 task_t task = MACH_PORT_NULL;
104 host_get_special_port(mach_host_self(), HOST_LOCAL_NODE, 4, &task);
116 eprintf(
"step failed on task %d for pid %d\n", task,
dbg->
tid);
125 eprintf(
"xnu_step modificy_trace_bit error\n");
137 RZ_LOG_ERROR(
"Failed to start listening to mach exceptions");
142 int r = rz_debug_ptrace(
dbg, PT_ATTACHEXC,
pid, 0, 0);
144 perror(
"ptrace(PT_ATTACHEXC)");
164 perror(
"ptrace(PT_DETACH)");
170 kern_return_t kr = mach_port_deallocate(mach_task_self(),
task_dbg);
171 if (kr != KERN_SUCCESS) {
172 eprintf(
"xnu_detach: failed to deallocate port\n");
184 struct task_basic_info
info;
185 mach_msg_type_number_t
count = TASK_BASIC_INFO_COUNT;
186 kr = task_info(task, TASK_BASIC_INFO, (task_info_t)&
info, &
count);
187 if (kr != KERN_SUCCESS) {
188 eprintf(
"failed to get task info\n");
191 return info.suspend_count;
201 if (suspend_count == -1) {
204 if (suspend_count == 1) {
208 if (suspend_count > 1) {
213 kern_return_t kr = task_suspend(task);
214 if (kr != KERN_SUCCESS) {
215 eprintf(
"failed to suspend task\n");
220 if (suspend_count != 1) {
235 eprintf(
"failed to get thread in xnu_continue\n");
241 eprintf(
"error clearing trace bit in xnu_continue\n");
245 kern_return_t kr = task_resume(task);
246 if (kr != KERN_SUCCESS) {
247 eprintf(
"xnu_continue: Warning: Failed to resume task\n");
253 #if __i386__ || __x86_64__
264 #elif __APPLE__ && (__aarch64__ || __arm64__ || __arm__)
271 #error "Unsupported Apple architecture"
291 #elif __arm64 || __aarch64
297 #elif __arm || __armv7 || __arm__ || __armv7__
305 #warning TODO powerpc support here
350 bool anywhere = !VM_FLAGS_ANYWHERE;
355 anywhere = VM_FLAGS_ANYWHERE;
357 ret = vm_allocate(th->
port, (vm_address_t *)&base,
360 if (ret != KERN_SUCCESS) {
361 eprintf(
"vm_allocate failed\n");
373 mach_error_t ret = vm_deallocate(th->
port, (vm_address_t)
addr, (vm_size_t)
size);
374 if (ret != KERN_SUCCESS) {
382 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 };
384 size_t kpl =
sizeof(
struct kinfo_proc);
387 if (sysctl(mib,
len, kp, &kpl,
NULL, 0) == -1) {
398 struct kinfo_proc kp;
399 int kinfo_proc_error = 0;
406 if (kinfo_proc_error) {
407 eprintf(
"Error while querying the process info to sysctl\n");
413 rdi->uid = kp.kp_eproc.e_ucred.cr_uid;
414 rdi->gid = kp.kp_eproc.e_ucred.cr_gid;
416 struct proc_bsdinfo
proc;
418 char file_path[MAXPATHLEN] = { 0 };
420 file_path_len = proc_pidpath(
rdi->pid, file_path,
sizeof(file_path));
421 if (file_path_len > 0) {
422 file_path[file_path_len] = 0;
425 if (proc_pidinfo(
rdi->pid, PROC_PIDTBSDINFO, 0,
426 &
proc, PROC_PIDTBSDINFO_SIZE) == PROC_PIDTBSDINFO_SIZE) {
427 if ((
proc.pbi_flags & PROC_FLAG_TRACED) != 0) {
430 if ((
proc.pbi_flags & PROC_FLAG_INEXIT) != 0) {
454 #if __arm__ || __arm64__ || __aarch_64__
455 #define CPU_PC (dbg->bits == RZ_SYS_BITS_64) ? state.ts_64.__pc : state.ts_32.__pc
457 #define CPU_PC state.srr0
458 #elif __x86_64__ || __i386__
459 #define CPU_PC (dbg->bits == RZ_SYS_BITS_64) ? state.uts.ts64.__rip : state.uts.ts32.__eip
468 eprintf(
"Failed to get gpr registers xnu_thread_list\n");
479 static vm_prot_t unix_prot_to_darwin(
int prot) {
488 #define xwrz_testwx(x) ((x & 1) << 2) | (x & 2) | ((x & 4) >> 2)
490 mach_error_t ret = mach_vm_protect(task, (vm_address_t)
addr, (vm_size_t)
size, (boolean_t)0, xnu_perms);
491 if (ret != KERN_SUCCESS) {
499 static int old_pid = -1;
510 kr = mach_port_deallocate(mach_task_self(),
task_dbg);
511 if (kr != KERN_SUCCESS) {
512 eprintf(
"pid_to_task: fail to deallocate port\n");
517 err = task_for_pid(mach_task_self(), (
pid_t)
pid, &task);
518 if ((
err != KERN_SUCCESS) || !MACH_PORT_VALID(task)) {
522 if (task != MACH_PORT_NULL) {
525 (
int)task, (
int)
pid);
528 eprintf(
"You probably need to run as root\n");
540 kern_return_t kr = KERN_SUCCESS;
541 vm_address_t address = 0;
546 mach_msg_type_number_t
count;
547 struct vm_region_submap_info_64
info;
550 count = VM_REGION_SUBMAP_INFO_COUNT_64;
551 kr = vm_region_recurse_64(task, &address, &
size, &nesting_depth,
553 if (kr == KERN_INVALID_ADDRESS) {
557 mach_error(
"vm_region:", kr);
560 if (
info.is_submap) {
571 #define xwrz_testwx(x) ((x & 1) << 2) | (x & 2) | ((x & 4) >> 2)
572 #define COMMAND_SIZE(segment_count, segment_command_sz, \
573 thread_count, tstate_size) \
574 segment_count *segment_command_sz + thread_count * sizeof(struct thread_command) + tstate_size *thread_count
577 size_t *segment_command_sz) {
578 #if __ppc64__ || __x86_64__
581 #elif __i386__ || __ppc__ || __POWERPC__
591 int mib[CTL_MAXNAME];
592 size_t len = CTL_MAXNAME;
594 size_t cpu_type_len =
sizeof(cpu_type_t);
596 if (sysctlnametomib(
"sysctl.proc_cputype", mib, &
len) == -1) {
597 perror(
"sysctlnametomib");
605 if (cpu_type_len > 0)
612 cpu_subtype_t subtype;
614 size =
sizeof(cpu_subtype_t);
615 sysctlbyname(
"hw.cpusubtype", &subtype, &
size,
NULL, 0);
621 int segment_count,
int thread_count,
int command_size,
pid_t pid) {
622 #if __ppc64__ || __x86_64__ || (defined(TARGET_OS_MAC) && defined(__aarch64__))
629 mh64->
ncmds = segment_count + thread_count;
632 #elif __i386__ || __ppc__ || __POWERPC__
639 mh->
ncmds = segment_count + thread_count;
647 mach_msg_type_number_t thread_count;
648 thread_array_t thread_list;
650 mach_error_t kr = task_threads(
task_dbg, &thread_list, &thread_count);
651 if (kr != KERN_SUCCESS) {
654 rz_list_foreach_safe (
threads,
iter, iter2, thread) {
655 mach_port_deallocate(mach_task_self(), thread->
port);
657 vm_deallocate(mach_task_self(), (vm_address_t)thread_list,
658 thread_count *
sizeof(thread_act_t));
669 vm_offset_t
header,
int header_end,
int segment_command_sz,
int *hoffset_out) {
673 int hoffset = header_end;
674 kern_return_t kr = KERN_SUCCESS;
678 #define CAST_DOWN(type, addr) (((type)((uintptr_t)(addr))))
679 #if __ppc64__ || __x86_64__ || (defined(TARGET_OS_MAC) && defined(__aarch64__))
681 #elif __i386__ || __ppc__ || __POWERPC__
684 rz_list_foreach_safe (mem_maps,
iter, iter2, curr_map) {
688 vm_map_offset_t vmoffset = curr_map->
addr;
689 #if __ppc64__ || __x86_64__ || (defined(TARGET_OS_MAC) && defined(__aarch64__))
699 #elif __i386__ || __ppc__
720 (vbr.user_tag != VM_MEMORY_IOKIT)) {
723 vm_map_size_t tmp_size = curr_map->
size;
725 while (tmp_size > 0) {
726 vm_map_size_t xfer_size = tmp_size;
727 vm_offset_t local_address;
728 mach_msg_type_number_t local_size;
732 kr = mach_vm_read(
task_dbg, vmoffset, xfer_size,
733 &local_address, &local_size);
735 if ((kr != KERN_SUCCESS) || (xfer_size != local_size)) {
736 eprintf(
"Failed to read target memory\n");
737 eprintf(
"[DEBUG] kr = %d\n", kr);
738 eprintf(
"[DEBUG] KERN_SUCCESS = %d\n", KERN_SUCCESS);
740 eprintf(
"[DEBUG] local_size = %d\n", local_size);
745 #if __ppc64__ || __x86_64__ || __aarch64__ || __arm64__
754 eprintf(
"Failed to write in the destination\n");
758 tmp_size -= xfer_size;
762 hoffset += segment_command_sz;
763 foffset += curr_map->
size;
764 vmoffset += curr_map->
size;
768 *hoffset_out = hoffset;
773 thread_state_t tstate, mach_msg_type_number_t *
count) {
774 return thread_get_state(thread, flavor, tstate,
count);
795 for (
i = 0;
i < coredump_nflavors;
i++) {
796 eprintf(
"[DEBUG] %d/%d\n",
i + 1, coredump_nflavors);
801 hoffset += flavors[
i].
count *
sizeof(
int);
806 #define CORE_ALL_SECT 0
808 #include <sys/sysctl.h>
814 size_t procBufferSize =
sizeof(
process);
817 int path[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID,
pid };
818 const int pathLenth = (
sizeof(
path) /
sizeof(
int));
819 int sysctlResult = sysctl(
path, pathLenth, &
process, &procBufferSize,
NULL, 0);
821 if ((sysctlResult == 0) && (procBufferSize != 0)) {
822 uid =
process.kp_eproc.e_ucred.cr_uid;
833 size_t mach_header_sz;
834 size_t segment_command_sz;
851 (void)task_suspend(task);
857 memcpy(thread_flavor_array, &flavors,
sizeof(thread_flavor_array));
860 for (
i = 0;
i < coredump_nflavors;
i++) {
862 (flavors[
i].
count *
sizeof(
int));
865 command_size =
COMMAND_SIZE(segment_count, segment_command_sz,
867 header_size = command_size + mach_header_sz;
873 perror(
"There are not loaded maps");
876 header, mach_header_sz, segment_command_sz, &hoffset) < 0) {
877 eprintf(
"There was an error while writing the memory maps");
887 rz_list_foreach_safe (threads_list,
iter, iter2, thread) {
893 padding_sz = round_page(header_size) - header_size;
902 free((
void *)padding);
908 int psnamelen,
foo,
nargs, mib[3], uid;
909 size_t size, argmax = 4096;
910 char *curr_arg, *start_args, *iter_args, *end_args;
911 char *procargs =
NULL;
916 mib[1] = KERN_ARGMAX;
917 size =
sizeof(argmax);
918 if (sysctl (mib, 2, &argmax, &
size,
NULL, 0) == -1) {
919 eprintf (
"sysctl() error on getting argmax\n");
926 procargs = (
char *)
malloc(argmax);
928 eprintf(
"getcmdargs(): insufficient memory for procargs %d\n",
929 (
int)(
size_t)argmax);
937 mib[1] = KERN_PROCARGS2;
942 if (sysctl(mib, 3, procargs, &
size,
NULL, 0) == -1) {
948 eprintf(
"sysctl(): unspecified sysctl error - %i\n", errno);
955 iter_args = procargs +
sizeof(
nargs);
956 end_args = &procargs[
size - 30];
957 if (iter_args >= end_args) {
958 eprintf(
"getcmdargs(): argument length mismatch");
967 while (iter_args < end_args && *iter_args !=
'\0') { iter_args++; }
968 while (iter_args < end_args && *iter_args ==
'\0') { iter_args++; }
970 if (iter_args == end_args) {
974 curr_arg = iter_args;
975 start_args = iter_args;
979 while (iter_args < end_args && nargs > 0) {
980 if (*iter_args++ ==
'\0') {
981 int alen = strlen(curr_arg);
983 memcpy(psname, curr_arg, alen + 1);
986 psname[psnamelen] =
' ';
987 memcpy(psname + psnamelen + 1, curr_arg, alen + 1);
992 curr_arg = iter_args;
1002 if (curr_arg == start_args ||
nargs > 0) {
1013 vm_map_t target_task,
1014 mach_vm_address_t *address,
1015 mach_vm_size_t *
size,
1016 natural_t *nesting_depth,
1017 vm_region_recurse_info_t
info,
1018 mach_msg_type_number_t *infoCnt);
1022 case VM_INHERIT_SHARE:
return "share";
1023 case VM_INHERIT_COPY:
return "copy";
1024 case VM_INHERIT_NONE:
return "none";
1025 default:
return "???";
1029 #ifndef KERNEL_LOWER
1031 #define HEADER_SIZE 0x1000
1032 #define IMAGE_OFFSET 0x201000
1033 #define KERNEL_LOWER 0x80000000
1038 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
1039 vm_region_submap_info_data_64_t
info;
1041 unsigned int depth = 0;
1047 ret = task_for_pid(mach_task_self(), 0, &task);
1048 if (ret != KERN_SUCCESS) {
1055 ret = vm_region_recurse_64(task, (vm_address_t *)&naddr,
1056 (vm_size_t *)&
size, &depth,
1057 (vm_region_info_t)&
info, &info_count);
1058 if (ret != KERN_SUCCESS) {
1064 if (
addr == naddr) {
1072 if (
size > 1024 * 1024 * 1024) {
1077 ret = mach_port_deallocate(mach_task_self(), 0);
1078 if (ret != KERN_SUCCESS) {
1079 eprintf(
"get_kernel_base: leaking kernel port\n");
1081 return (vm_address_t)0;
1099 #warning TODO: xnu_dbg_modules not supported
1102 struct task_dyld_info
info;
1103 mach_msg_type_number_t
count;
1105 int size, info_array_count, info_array_size,
i;
1106 ut64 info_array_address;
1107 void *info_array =
NULL;
1109 char file_path[MAXPATHLEN] = { 0 };
1110 count = TASK_DYLD_INFO_COUNT;
1119 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&
info, &
count);
1120 if (kr != KERN_SUCCESS) {
1125 if (
info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_64) {
1141 if (info_array_address == 0) {
1144 info_array_size =
RZ_ABS(info_array_size);
1145 info_array =
calloc(1, info_array_size);
1147 eprintf(
"Cannot allocate info_array_size %d\n",
1159 for (
i = 0;
i < info_array_count;
i++) {
1160 if (
info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_64) {
1164 file_path_address =
info->image_file_path;
1169 file_path_address =
info->image_file_path;
1171 memset(file_path, 0, MAXPATHLEN);
1173 (
ut8 *)file_path, MAXPATHLEN - 1);
1178 eprintf(
"Cannot create rz_debug_map_new\n");
1201 static int cmp(
const void *_a,
const void *_b) {
1204 if (
a->addr >
b->addr) {
1207 if (
a->addr <
b->addr) {
1230 char module_name[MAXPATHLEN];
1231 mach_vm_address_t address = MACH_VM_MIN_ADDRESS;
1232 mach_vm_size_t
size = (mach_vm_size_t)0;
1233 mach_vm_size_t osize = (mach_vm_size_t)0;
1234 natural_t depth = 0;
1247 #if __arm64__ || __aarch64__
1248 size = osize = 16384;
1250 size = osize = 4096;
1265 struct vm_region_submap_info_64
info = { 0 };
1266 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
1268 (vm_region_recurse_info_t)&
info, &info_count);
1269 if (kr != KERN_SUCCESS) {
1272 if (
info.is_submap) {
1280 sizeof(module_name));
1281 module_name[ret] = 0;
1288 snprintf(depthstr,
sizeof(depthstr),
"_%d", depth);
1293 if (
info.max_protection !=
info.protection) {
1301 info.user_tag ?
"_user" :
"",
1302 info.is_submap ?
"_sub" :
"",
1303 "",
info.is_submap ?
"_submap" :
"",
1304 module_name, maxperm, depthstr);
1306 eprintf(
"Cannot create rz_debug_map_new\n");
1318 if (!strcmp(
mr->file,
mr->file)) {
1322 strcpy(
mr->name, slash + 1);
1346 strcpy(m2->
name, slash + 1);
RzBinInfo * info(RzBinFile *bf)
static int mr(RzAnalysisEsil *esil, ut64 addr, ut8 *buf, int len)
static static fork const void static count static fd const char const char static newpath const char static path const char path
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
RZ_API RzDebugMap * rz_debug_map_get(RzDebug *dbg, ut64 addr)
RZ_API bool rz_debug_map_sync(RzDebug *dbg)
RZ_API RzDebugMap * rz_debug_map_new(char *name, ut64 addr, ut64 addr_end, int perm, int user)
size_t map(int syms, int left, int len)
RZ_API void Ht_() free(HtName_(Ht) *ht)
return memset(p, 0, total)
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
static void list(RzEgg *egg)
RZ_API RZ_OWN RzList * rz_list_newf(RzListFree f)
Returns a new initialized RzList pointer and sets the free method.
RZ_API RZ_OWN RzList * rz_list_new(void)
Returns a new initialized RzList pointer (free method is not initialized)
RZ_API void rz_list_sort(RZ_NONNULL RzList *list, RZ_NONNULL RzListComparator cmp)
Sorts via merge sort or via insertion sort a list.
RZ_API ut32 rz_list_length(RZ_NONNULL const RzList *list)
Returns the length of the list.
RZ_API RZ_BORROW RzListIter * rz_list_append(RZ_NONNULL RzList *list, void *data)
Appends at the end of the list a new element.
RZ_API void rz_list_free(RZ_NONNULL RzList *list)
Empties the list and frees the list pointer.
void * malloc(size_t size)
void * calloc(size_t number, size_t size)
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf static inc static sig const char static mode static oldfd struct tms static buf static getgid static geteuid const char static filename static arg static mask struct ustat static ubuf static getppid static setsid static egid sigset_t static set struct timeval struct timezone static tz fd_set fd_set fd_set struct timeval static timeout const char char static bufsiz const char static swapflags void prot
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf static inc pid
return strdup("=SP r13\n" "=LR r14\n" "=PC r15\n" "=A0 r0\n" "=A1 r1\n" "=A2 r2\n" "=A3 r3\n" "=ZF zf\n" "=SF nf\n" "=OF vf\n" "=CF cf\n" "=SN or0\n" "gpr lr .32 56 0\n" "gpr pc .32 60 0\n" "gpr cpsr .32 64 0 ____tfiae_________________qvczn\n" "gpr or0 .32 68 0\n" "gpr tf .1 64.5 0 thumb\n" "gpr ef .1 64.9 0 endian\n" "gpr jf .1 64.24 0 java\n" "gpr qf .1 64.27 0 sticky_overflow\n" "gpr vf .1 64.28 0 overflow\n" "gpr cf .1 64.29 0 carry\n" "gpr zf .1 64.30 0 zero\n" "gpr nf .1 64.31 0 negative\n" "gpr itc .4 64.10 0 if_then_count\n" "gpr gef .4 64.16 0 great_or_equal\n" "gpr r0 .32 0 0\n" "gpr r1 .32 4 0\n" "gpr r2 .32 8 0\n" "gpr r3 .32 12 0\n" "gpr r4 .32 16 0\n" "gpr r5 .32 20 0\n" "gpr r6 .32 24 0\n" "gpr r7 .32 28 0\n" "gpr r8 .32 32 0\n" "gpr r9 .32 36 0\n" "gpr r10 .32 40 0\n" "gpr r11 .32 44 0\n" "gpr r12 .32 48 0\n" "gpr r13 .32 52 0\n" "gpr r14 .32 56 0\n" "gpr r15 .32 60 0\n" "gpr r16 .32 64 0\n" "gpr r17 .32 68 0\n")
#define header(is_bt, len_min, ret_op)
RZ_API RzDebugPid * rz_debug_pid_free(RzDebugPid *pid)
RZ_API RzDebugPid * rz_debug_pid_new(const char *path, int pid, int uid, char status, ut64 pc)
RZ_API bool rz_buf_append_bytes(RZ_NONNULL RzBuffer *b, RZ_NONNULL const ut8 *buf, ut64 len)
Append an array of bytes to the buffer.
RZ_API void rz_buf_free(RzBuffer *b)
Free all internal data hold by the buffer and the buffer.
RZ_API bool rz_buf_append_buf(RZ_NONNULL RzBuffer *b, RZ_NONNULL RzBuffer *a)
Append the content of the buffer a to the buffer b.
RZ_API RZ_OWN RzBuffer * rz_buf_new_with_bytes(RZ_NULLABLE RZ_BORROW const ut8 *bytes, ut64 len)
Creates a new buffer with a bytes array.
void(* RzListFree)(void *ptr)
#define RZ_LOG_ERROR(fmtstr,...)
RZ_API const char * rz_str_lchr(const char *str, char chr)
RZ_API const char * rz_str_rwx_i(int rwx)
static const char * rz_str_get_null(const char *str)
#define RZ_BETWEEN(x, y, z)
mach_msg_type_number_t count
coredump_thread_state_flavor_t * flavors
static uv_thread_t * threads
void error(const char *msg)
ut64(WINAPI *w32_GetEnabledXStateFeatures)()
static const char * unparse_inheritance(vm_inherit_t i)
char * xnu_reg_profile(RzDebug *dbg)
bool xnu_step(RzDebug *dbg)
int xnu_map_dealloc(RzDebug *dbg, ut64 addr, int size)
static RzDebugMap * moduleAt(RzList *list, ut64 addr)
int xnu_continue(RzDebug *dbg, int pid, int tid, int sig)
static task_t task_for_pid_workaround(int Pid)
RzDebugMap * xnu_map_alloc(RzDebug *dbg, ut64 addr, int size)
static int cmp(const void *_a, const void *_b)
bool xnu_generate_corefile(RzDebug *dbg, RzBuffer *dest)
static int xnu_write_mem_maps_to_buffer(RzBuffer *buffer, RzList *mem_maps, int start_offset, vm_offset_t header, int header_end, int segment_command_sz, int *hoffset_out)
int xnu_map_protect(RzDebug *dbg, ut64 addr, int size, int perms)
int proc_regionfilename(int pid, uint64_t address, void *buffer, uint32_t buffersize)
RzList * xnu_dbg_maps(RzDebug *dbg, int only_modules)
static void xnu_collect_thread_state(thread_t port, void *tirp)
RzDebugPid * xnu_get_pid(int pid)
static RzDebugMap * rz_debug_map_clone(RzDebugMap *m)
int xnu_reg_write(RzDebug *dbg, int type, const ut8 *buf, int size)
int xnu_get_vmmap_entries_for_pid(pid_t pid)
#define DYLD_IMAGE_INFO_64_SIZE
#define DYLD_IMAGE_INFO_32_SIZE
static uid_t uidFromPid(pid_t pid)
int xnu_detach(RzDebug *dbg, int pid)
static int xnu_get_kinfo_proc(int pid, struct kinfo_proc *kp)
int xnu_stop(RzDebug *dbg, int pid)
static RzList * xnu_dbg_modules(RzDebug *dbg)
kern_return_t mach_vm_region_recurse(vm_map_t target_task, mach_vm_address_t *address, mach_vm_size_t *size, natural_t *nesting_depth, vm_region_recurse_info_t info, mach_msg_type_number_t *infoCnt)
static void get_mach_header_sizes(size_t *mach_header_sz, size_t *segment_command_sz)
static int xnu_get_thread_status(register thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t *count)
static int task_suspend_count(task_t task)
static void xnu_map_free(RzDebugMap *map)
static int xnu_dealloc_threads(RzList *threads)
int xnu_reg_read(RzDebug *dbg, int type, ut8 *buf, int size)
static cpu_type_t xnu_get_cpu_type(pid_t pid)
vm_address_t get_kernel_base(task_t ___task)
#define CAST_DOWN(type, addr)
int xnu_wait(RzDebug *dbg, int pid)
static task_t task_for_pid_ios9pangu(int pid)
int xnu_attach(RzDebug *dbg, int pid)
static int mach0_size(RzDebug *dbg, ut64 addr)
RzList * xnu_thread_list(RzDebug *dbg, int pid, RzList *list)
static cpu_subtype_t xnu_get_cpu_subtype(void)
task_t pid_to_task(int pid)
#define COMMAND_SIZE(segment_count, segment_command_sz, thread_count, tstate_size)
static void xnu_build_corefile_header(vm_offset_t header, int segment_count, int thread_count, int command_size, pid_t pid)
RzDebugInfo * xnu_info(RzDebug *dbg, const char *arg)
#define MAX_TSTATE_FLAVORS
#define LOG_MACH_ERROR(name, rc)
RZ_IPI bool xnu_restore_exception_ports(int pid)
RZ_IPI RzDebugReasonType xnu_wait_for_exception(RzDebug *dbg, int pid, ut32 timeout_ms, bool quiet_signal)
RZ_IPI bool xnu_create_exception_thread(RzDebug *dbg)
RZ_IPI bool rz_xnu_thread_set_gpr(RzDebug *dbg, xnu_thread_t *thread)
RZ_IPI int rz_xnu_update_thread_list(RzDebug *dbg)
RZ_IPI xnu_thread_t * rz_xnu_get_thread(RzDebug *dbg, int tid)
RZ_IPI thread_t rz_xnu_get_cur_thread(RzDebug *dbg)
RZ_IPI bool rz_xnu_thread_set_drx(RzDebug *dbg, xnu_thread_t *thread)
RZ_IPI bool rz_xnu_thread_get_gpr(RzDebug *dbg, xnu_thread_t *thread)
RZ_IPI bool rz_xnu_thread_get_drx(RzDebug *dbg, xnu_thread_t *thread)
static bool xnu_set_trace_bit(RzDebug *dbg, xnu_thread_t *th)
static bool xnu_clear_trace_bit(RzDebug *dbg, xnu_thread_t *th)
static zip_compression_status_t process(void *ud, zip_uint8_t *data, zip_uint64_t *length)