4 #ifdef CAPSTONE_HAS_X86
6 #if defined(CAPSTONE_HAS_OSXKERNEL)
7 #include <Availability.h>
11 #ifndef CAPSTONE_HAS_OSXKERNEL
20 #include "../../utils.h"
32 static const x86_reg sib_base_map[] = {
34 #define ENTRY(x) X86_REG_##x,
51 static const x86_reg sib_index_map[] = {
53 #define ENTRY(x) X86_REG_##x,
61 static const x86_reg segment_map[] = {
73 return sib_base_map[
r];
78 return sib_index_map[
r];
83 return segment_map[
r];
87 static const name_map reg_name_maps[] = {
828 #ifndef CAPSTONE_DIET
841 return reg_name_maps[
reg].
name;
847 #ifndef CAPSTONE_DIET
848 static const name_map insn_name_maps[] = {
2366 #ifndef CAPSTONE_DIET
2370 return insn_name_maps[
id].
name;
2376 #ifndef CAPSTONE_DIET
2436 #ifndef CAPSTONE_DIET
2443 #define GET_INSTRINFO_ENUM
2444 #ifdef CAPSTONE_X86_REDUCE
2450 #ifndef CAPSTONE_X86_REDUCE
2455 #ifndef CAPSTONE_DIET
2456 { 0 }, { 0 }, { 0 }, 0, 0
2467 #ifndef CAPSTONE_DIET
2468 { 0 }, { 0 }, { 0 }, 0, 0
2476 #ifndef CAPSTONE_DIET
2482 for(
i = 0;
i <
max;
i++) {
2496 insn->id =
insns[
i].mapid;
2499 #ifndef CAPSTONE_DIET
2516 insn->detail->regs_write_count = 2;
2527 insn->detail->regs_write_count = 3;
2543 insn->detail->regs_read_count = 1;
2545 insn->detail->regs_write_count = 1;
2549 insn->detail->regs_read_count = 1;
2551 insn->detail->regs_write_count = 1;
2555 insn->detail->regs_read_count = 1;
2557 insn->detail->regs_write_count = 1;
2564 insn->detail->regs_read_count = 2;
2649 insn->detail->regs_write_count = 1;
2650 insn->detail->regs_read_count = 1;
2659 insn->detail->groups[insn->detail->groups_count] =
X86_GRP_JUMP;
2660 insn->detail->groups_count++;
2667 if (insn->detail->x86.operands[0].imm == -78) {
2670 insn->detail->groups[insn->detail->groups_count] =
X86_GRP_INT;
2671 insn->detail->groups_count++;
2698 static struct insn_reg insn_regs_att[] = {
2800 #ifndef CAPSTONE_X86_REDUCE
2820 static struct insn_reg insn_regs_intel[] = {
2924 #ifndef CAPSTONE_X86_REDUCE
2954 static struct insn_reg2 insn_regs_intel2[] = {
2967 static struct insn_reg insn_regs_intel_sorted [
ARR_SIZE(insn_regs_intel)];
2974 uint16_t l = ((
struct insn_reg *)
a)->insn;
2984 static bool intel_regs_sorted =
false;
2985 unsigned int first = 0;
2986 unsigned int last =
ARR_SIZE(insn_regs_intel) - 1;
2989 if (!intel_regs_sorted) {
2990 memcpy(insn_regs_intel_sorted, insn_regs_intel,
2991 sizeof(insn_regs_intel_sorted));
2992 qsort(insn_regs_intel_sorted,
2994 sizeof(
struct insn_reg), regs_cmp);
2995 intel_regs_sorted =
true;
2998 if (insn_regs_intel_sorted[0].insn >
id ||
2999 insn_regs_intel_sorted[last].insn <
id) {
3003 while (first <= last) {
3004 mid = (first + last) / 2;
3005 if (insn_regs_intel_sorted[mid].insn <
id) {
3007 }
else if (insn_regs_intel_sorted[mid].insn ==
id) {
3009 *
access = insn_regs_intel_sorted[mid].access;
3011 return insn_regs_intel_sorted[mid].reg;
3027 for (
i = 0;
i <
ARR_SIZE(insn_regs_intel2);
i++) {
3028 if (insn_regs_intel2[
i].insn ==
id) {
3029 *reg1 = insn_regs_intel2[
i].reg1;
3030 *reg2 = insn_regs_intel2[
i].reg2;
3032 *access1 = insn_regs_intel2[
i].access1;
3034 *access2 = insn_regs_intel2[
i].access2;
3048 for (
i = 0;
i <
ARR_SIZE(insn_regs_intel2);
i++) {
3049 if (insn_regs_intel2[
i].insn ==
id) {
3051 *reg1 = insn_regs_intel2[
i].reg2;
3052 *reg2 = insn_regs_intel2[
i].reg1;
3054 *access1 = insn_regs_intel2[
i].access2;
3056 *access2 = insn_regs_intel2[
i].access1;
3070 if (insn_regs_att[
i].insn ==
id) {
3072 *
access = insn_regs_att[
i].access;
3073 return insn_regs_att[
i].reg;
3082 static bool valid_repne(
cs_struct *
h,
unsigned int opcode)
3125 if (opcode == X86_MOVSW)
3130 if (opcode == X86_CMPSL)
3135 if (opcode == X86_SCASL)
3147 #ifndef CAPSTONE_DIET
3148 static bool valid_bnd(
cs_struct *
h,
unsigned int opcode)
3193 static bool xchg_mem(
unsigned int opcode)
3207 static bool valid_rep(
cs_struct *
h,
unsigned int opcode)
3241 if (opcode == X86_MOVSL)
3246 if (opcode == X86_LODSL)
3251 if (opcode == X86_STOSL)
3262 static bool valid_repe(
cs_struct *
h,
unsigned int opcode)
3284 if (opcode == X86_CMPSL)
3289 if (opcode == X86_SCASL)
3299 #ifndef CAPSTONE_DIET
3301 static void add_cx(
MCInst *MI)
3314 MI->
flat_insn->detail->regs_read_count++;
3317 MI->
flat_insn->detail->regs_write_count++;
3325 unsigned int opcode;
3332 #ifndef CAPSTONE_DIET
3344 #ifndef CAPSTONE_DIET
3347 }
else if (valid_repne(MI->
csh, opcode)) {
3350 }
else if (valid_bnd(MI->
csh, opcode)) {
3357 #ifndef CAPSTONE_X86_REDUCE
3358 if (opcode == X86_MULPDrr) {
3366 if (!valid_repne(MI->
csh, opcode)) {
3369 #ifndef CAPSTONE_X86_REDUCE
3371 if (opcode == X86_MULPDrr) {
3381 #ifndef CAPSTONE_DIET
3384 }
else if (valid_rep(MI->
csh, opcode)) {
3387 }
else if (valid_repe(MI->
csh, opcode)) {
3395 #ifndef CAPSTONE_X86_REDUCE
3396 if (opcode == X86_MULPDrr) {
3404 if (!valid_rep(MI->
csh, opcode) && !valid_repe(MI->
csh, opcode)) {
3407 #ifndef CAPSTONE_X86_REDUCE
3409 if (opcode == X86_MULPDrr) {
3444 if (MI->
flat_insn->detail->x86.op_count > 0)
3490 MI->
flat_insn->detail->x86.operands[MI->
flat_insn->detail->x86.op_count - 1].avx_zero_opmask =
true;
3497 MI->
flat_insn->detail->x86.avx_sae =
true;
3505 MI->
flat_insn->detail->x86.operands[MI->
flat_insn->detail->x86.op_count - 1].avx_bcast =
v;
3509 #ifndef CAPSTONE_DIET
3511 typedef struct insn_op {
3516 static insn_op insn_ops[] = {
3522 #ifdef CAPSTONE_X86_REDUCE
3534 *eflags = insn_ops[
i].flags;
3535 return insn_ops[
i].access;
3542 cs_regs regs_read,
uint8_t *regs_read_count,
3543 cs_regs regs_write,
uint8_t *regs_write_count)
3546 uint8_t read_count, write_count;
3549 read_count = insn->detail->regs_read_count;
3550 write_count = insn->detail->regs_write_count;
3553 memcpy(regs_read, insn->detail->regs_read, read_count *
sizeof(insn->detail->regs_read[0]));
3554 memcpy(regs_write, insn->detail->regs_write, write_count *
sizeof(insn->detail->regs_write[0]));
3557 for (
i = 0;
i <
x86->op_count;
i++) {
3559 switch((
int)
op->type) {
3562 regs_read[read_count] =
op->reg;
3566 regs_write[write_count] =
op->reg;
3573 regs_read[read_count] =
op->mem.segment;
3577 regs_read[read_count] =
op->mem.base;
3581 regs_read[read_count] =
op->mem.index;
3589 *regs_read_count = read_count;
3590 *regs_write_count = write_count;
3595 static struct size_id {
3599 } x86_imm_size[] = {
3611 if (
id == x86_imm_size[
i].
id) {
3612 return x86_imm_size[
i].size;
3618 unsigned int left, right,
m;
3621 right =
ARR_SIZE(x86_imm_size) - 1;
3623 while(left <= right) {
3624 m = (left + right) / 2;
3625 if (
id == x86_imm_size[
m].
id) {
3626 if (enc_size !=
NULL)
3627 *enc_size = x86_imm_size[
m].enc_size;
3629 return x86_imm_size[
m].size;
3632 if (
id < x86_imm_size[
m].
id)
static name_map group_name_maps[]
unsigned MCInst_getOpcode(const MCInst *inst)
void MCInst_setOpcode(MCInst *inst, unsigned Op)
void SStream_concat(SStream *ss, const char *fmt,...)
void op_addAvxBroadcast(MCInst *MI, x86_avx_bcast v)
const char * X86_reg_name(csh handle, unsigned int reg)
const char * X86_insn_name(csh handle, unsigned int id)
uint8_t X86_immediate_size(unsigned int id, uint8_t *enc_size)
x86_reg x86_map_sib_index(int r)
const uint64_t arch_masks[9]
bool X86_lockrep(MCInst *MI, SStream *O)
bool X86_insn_reg_att2(unsigned int id, x86_reg *reg1, enum cs_ac_type *access1, x86_reg *reg2, enum cs_ac_type *access2)
void X86_reg_access(const cs_insn *insn, cs_regs regs_read, uint8_t *regs_read_count, cs_regs regs_write, uint8_t *regs_write_count)
void op_addAvxZeroOpmask(MCInst *MI)
void op_addSseCC(MCInst *MI, int v)
x86_reg x86_map_segment(int r)
void X86_get_insn_id(cs_struct *h, cs_insn *insn, unsigned int id)
void op_addImm(MCInst *MI, int v)
void op_addAvxRoundingMode(MCInst *MI, int v)
const uint8_t regsize_map_32[]
uint8_t * X86_get_op_access(cs_struct *h, unsigned int id, uint64_t *eflags)
void op_addAvxSae(MCInst *MI)
x86_reg X86_insn_reg_att(unsigned int id, enum cs_ac_type *access)
x86_reg x86_map_sib_base(int r)
void op_addXopCC(MCInst *MI, int v)
const uint8_t regsize_map_64[]
void op_addReg(MCInst *MI, int reg)
const char * X86_group_name(csh handle, unsigned int id)
bool X86_insn_reg_intel2(unsigned int id, x86_reg *reg1, enum cs_ac_type *access1, x86_reg *reg2, enum cs_ac_type *access2)
x86_reg X86_insn_reg_intel(unsigned int id, enum cs_ac_type *access)
void op_addAvxCC(MCInst *MI, int v)
static mcore_handle handle
@ CS_MODE_64
64-bit mode (X86, PPC)
@ CS_MODE_32
32-bit mode (X86)
@ CS_MODE_16
16-bit mode (X86)
@ CS_OPT_SYNTAX_ATT
X86 ATT asm syntax (CS_OPT_SYNTAX).
@ CS_AC_READ
Operand read from memory or register.
@ CS_AC_WRITE
Operand write to memory or register.
x86_avx_bcast
AVX broadcast type.
@ X86_INS_VPBROADCASTMW2D
@ X86_INS_AESKEYGENASSIST
@ X86_INS_VBROADCASTI64X4
@ X86_INS_VPBROADCASTMB2Q
@ X86_INS_VBROADCASTI32X4
@ X86_INS_VAESKEYGENASSIST
@ X86_OP_IMM
= CS_OP_IMM (Immediate operand).
@ X86_OP_REG
= CS_OP_REG (Register operand).
@ X86_OP_MEM
= CS_OP_MEM (Memory operand).
@ X86_GRP_INVALID
= CS_GRP_INVALID
@ X86_GRP_BRANCH_RELATIVE
= CS_GRP_BRANCH_RELATIVE
@ X86_GRP_INT
= CS_GRP_INT
@ X86_GRP_CALL
= CS_GRP_CALL
@ X86_GRP_IRET
= CS_GRP_IRET
@ X86_GRP_PRIVILEGE
= CS_GRP_PRIVILEGE
@ X86_GRP_RET
= CS_GRP_RET
@ X86_GRP_VM
all virtualization instructions (VT-x + AMD-V)
@ X86_GRP_JUMP
= CS_GRP_JUMP
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
static static fork const void static count static fd const char static mode const char static pathname const char static path const char static dev const char static group static getpid static getuid void void static data static pause access
void qsort(void *a, size_t n, size_t es, int(*cmp)(const void *, const void *))
static struct sockaddr static addrlen static backlog const void static flags void flags
const uint8_t * regsize_map
bool arr_exist(uint16_t *arr, unsigned char max, unsigned int id)
unsigned int count_positive(const uint16_t *list)
unsigned int count_positive8(const unsigned char *list)
unsigned short insn_find(const insn_map *insns, unsigned int max, unsigned int id, unsigned short **cache)
const char * id2name(const name_map *map, int max, const unsigned int id)
static struct insnlist * insns[64]