make qemu patches more simple using helpers

This commit is contained in:
Andrea Fioraldi
2020-02-16 13:19:20 +01:00
parent bd1acfd868
commit 4bca8af499
6 changed files with 68 additions and 579 deletions

View File

@ -91,15 +91,11 @@ extern __thread u32 __afl_cmp_counter;
void afl_setup(void);
void afl_forkserver(CPUState *cpu);
void afl_debug_dump_saved_regs();
// void afl_debug_dump_saved_regs(void);
void afl_persistent_loop();
void afl_persistent_loop(void);
void tcg_gen_afl_call0(void *func);
void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv arg1,
TCGv arg2);
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
void afl_gen_tcg_plain_call(void *func);
void afl_float_compcov_log_32(target_ulong cur_loc, float32 arg1, float32 arg2,
void *status);
@ -112,12 +108,10 @@ void afl_float_compcov_log_80(target_ulong cur_loc, floatx80 arg1,
static inline int is_valid_addr(target_ulong addr) {
int l, flags;
int flags;
target_ulong page;
void * p;
page = addr & TARGET_PAGE_MASK;
l = (page + TARGET_PAGE_SIZE) - addr;
flags = page_get_flags(page);
if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0;

View File

@ -409,7 +409,7 @@ void afl_forkserver(CPUState *cpu) {
/* A simplified persistent mode handler, used as explained in README.llvm. */
void afl_persistent_loop() {
void afl_persistent_loop(void) {
static u32 cycle_cnt;
static struct afl_tsl exit_cmd_tsl = {{-1, 0, 0, 0}, NULL};

View File

@ -35,7 +35,14 @@
#include "tcg.h"
#include "tcg-op.h"
static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
#if TCG_TARGET_REG_BITS == 64
#define _DEFAULT_MO MO_64
#else
#define _DEFAULT_MO MO_32
#endif
void HELPER(afl_compcov_16)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
@ -44,7 +51,7 @@ static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
}
static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
void HELPER(afl_compcov_32)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
@ -63,7 +70,7 @@ static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
}
static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
void HELPER(afl_compcov_64)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t idx = cur_loc;
@ -102,7 +109,7 @@ static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
}
static void afl_cmplog_16(target_ulong cur_loc, target_ulong arg1,
void HELPER(afl_cmplog_16)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
@ -121,7 +128,7 @@ static void afl_cmplog_16(target_ulong cur_loc, target_ulong arg1,
}
static void afl_cmplog_32(target_ulong cur_loc, target_ulong arg1,
void HELPER(afl_cmplog_32)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
@ -137,7 +144,7 @@ static void afl_cmplog_32(target_ulong cur_loc, target_ulong arg1,
}
static void afl_cmplog_64(target_ulong cur_loc, target_ulong arg1,
void HELPER(afl_cmplog_64)(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
register uintptr_t k = (uintptr_t)cur_loc;
@ -153,28 +160,28 @@ static void afl_cmplog_64(target_ulong cur_loc, target_ulong arg1,
}
static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
static void afl_gen_compcov(target_ulong cur_loc, TCGv arg1, TCGv arg2,
TCGMemOp ot, int is_imm) {
void *func;
if (cur_loc > afl_end_code || cur_loc < afl_start_code) return;
if (__afl_cmp_map) {
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= CMP_MAP_W - 1;
TCGv cur_loc_v = tcg_const_tl(cur_loc);
switch (ot) {
case MO_64: func = &afl_cmplog_64; break;
case MO_32: func = &afl_cmplog_32; break;
case MO_16: func = &afl_cmplog_16; break;
default: return;
case MO_64: gen_helper_afl_cmplog_64(cur_loc_v, arg1, arg2); break;
case MO_32: gen_helper_afl_cmplog_32(cur_loc_v, arg1, arg2); break;
case MO_16: gen_helper_afl_cmplog_16(cur_loc_v, arg1, arg2); break;
default: break;
}
tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
tcg_temp_free(cur_loc_v);
} else if (afl_compcov_level) {
@ -182,19 +189,21 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 7;
TCGv cur_loc_v = tcg_const_tl(cur_loc);
if (cur_loc >= afl_inst_rms) return;
switch (ot) {
case MO_64: func = &afl_compcov_log_64; break;
case MO_32: func = &afl_compcov_log_32; break;
case MO_16: func = &afl_compcov_log_16; break;
default: return;
case MO_64: gen_helper_afl_compcov_64(cur_loc_v, arg1, arg2); break;
case MO_32: gen_helper_afl_compcov_32(cur_loc_v, arg1, arg2); break;
case MO_16: gen_helper_afl_compcov_16(cur_loc_v, arg1, arg2); break;
default: break;
}
tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
tcg_temp_free(cur_loc_v);
}
@ -276,20 +285,22 @@ static void gpr_saving(TCGv *cpu_regs, int regs_num) {
gpr_sv = tcg_const_ptr(&persistent_saved_gpr[i]);
tcg_gen_st_tl(cpu_regs[i], gpr_sv, 0);
tcg_temp_free_ptr(gpr_sv);
}
gen_set_label(lbl_restore_gpr);
tcg_gen_afl_call0(&afl_persistent_loop);
afl_gen_tcg_plain_call(&afl_persistent_loop);
if (afl_persistent_hook_ptr) tcg_gen_afl_call0(callback_to_persistent_hook);
if (afl_persistent_hook_ptr) afl_gen_tcg_plain_call(callback_to_persistent_hook);
// restore GPR registers
for (i = 0; i < regs_num; ++i) {
gpr_sv = tcg_const_ptr(&persistent_saved_gpr[i]);
tcg_gen_ld_tl(cpu_regs[i], gpr_sv, 0);
tcg_temp_free_ptr(gpr_sv);
}
@ -323,19 +334,19 @@ static void restore_state_for_persistent(TCGv *cpu_regs, int regs_num, int sp) {
if (s->pc == afl_persistent_addr) { \
\
restore_state_for_persistent(cpu_regs, AFL_REGS_NUM, R_ESP); \
/*tcg_gen_afl_call0(log_x86_saved_gpr); \
tcg_gen_afl_call0(log_x86_sp_content);*/ \
/*afl_gen_tcg_plain_call(log_x86_saved_gpr); \
afl_gen_tcg_plain_call(log_x86_sp_content);*/ \
\
if (afl_persistent_ret_addr == 0) { \
\
TCGv_ptr paddr = tcg_const_ptr(afl_persistent_addr); \
tcg_gen_st_tl(paddr, cpu_regs[R_ESP], persisent_retaddr_offset); \
tcg_temp_free_ptr(paddr); \
TCGv paddr = tcg_const_tl(afl_persistent_addr); \
tcg_gen_qemu_st_tl(paddr, cpu_regs[R_ESP], persisent_retaddr_offset, _DEFAULT_MO); \
tcg_temp_free(paddr); \
\
} \
\
if (!persistent_save_gpr) tcg_gen_afl_call0(&afl_persistent_loop); \
/*tcg_gen_afl_call0(log_x86_sp_content);*/ \
if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \
/*afl_gen_tcg_plain_call(log_x86_sp_content);*/ \
\
} else if (afl_persistent_ret_addr && s->pc == afl_persistent_ret_addr) { \
\
@ -357,13 +368,11 @@ static void restore_state_for_persistent(TCGv *cpu_regs, int regs_num, int sp) {
\
if (afl_persistent_ret_addr == 0) { \
\
TCGv_ptr paddr = tcg_const_ptr(afl_persistent_addr); \
tcg_gen_mov_i32(cpu_R[14], paddr); \
tcg_temp_free_ptr(paddr); \
tcg_gen_movi_tl(cpu_R[14], afl_persistent_addr); \
\
} \
\
if (!persistent_save_gpr) tcg_gen_afl_call0(&afl_persistent_loop); \
if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \
\
} else if (afl_persistent_ret_addr && dc->pc == afl_persistent_ret_addr) { \
\
@ -384,13 +393,11 @@ static void restore_state_for_persistent(TCGv *cpu_regs, int regs_num, int sp) {
\
if (afl_persistent_ret_addr == 0) { \
\
TCGv_ptr paddr = tcg_const_ptr(afl_persistent_addr); \
tcg_gen_mov_i32(cpu_X[30], paddr); \
tcg_temp_free_ptr(paddr); \
tcg_gen_movi_tl(cpu_X[30], afl_persistent_addr); \
\
} \
\
if (!persistent_save_gpr) tcg_gen_afl_call0(&afl_persistent_loop); \
if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \
\
} else if (afl_persistent_ret_addr && s->pc == afl_persistent_ret_addr) { \
\

View File

@ -31,535 +31,14 @@
*/
void afl_maybe_log(void *cur_loc);
void afl_gen_tcg_plain_call(void *func) {
/* Note: we convert the 64 bit args to 32 bit and do some alignment
and endian swap. Maybe it would be better to do the alignment
and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
int real_args, pi;
unsigned sizemask, flags;
TCGOp * op;
#if TARGET_LONG_BITS == 64
TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc));
sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
#else
TCGTemp *arg = tcgv_i32_temp(tcg_const_tl(cur_loc));
sizemask = dh_sizemask(void, 0) | dh_sizemask(i32, 1);
#endif
flags = 0;
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* We have 64-bit values in one register, but need to pass as two
separate parameters. Split them. */
int orig_sizemask = sizemask;
TCGv_i64 retl, reth;
TCGTemp *split_args[MAX_OPC_PARAM];
retl = NULL;
reth = NULL;
if (sizemask != 0) {
real_args = 0;
int is_64bit = sizemask & (1 << 2);
if (is_64bit) {
TCGv_i64 orig = temp_tcgv_i64(arg);
TCGv_i32 h = tcg_temp_new_i32();
TCGv_i32 l = tcg_temp_new_i32();
tcg_gen_extr_i64_i32(l, h, orig);
split_args[real_args++] = tcgv_i32_temp(h);
split_args[real_args++] = tcgv_i32_temp(l);
} else {
split_args[real_args++] = arg;
}
nargs = real_args;
args = split_args;
sizemask = 0;
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
int is_64bit = sizemask & (1 << 2);
int is_signed = sizemask & (2 << 2);
if (!is_64bit) {
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 orig = temp_tcgv_i64(arg);
if (is_signed) {
tcg_gen_ext32s_i64(temp, orig);
} else {
tcg_gen_ext32u_i64(temp, orig);
}
arg = tcgv_i64_temp(temp);
}
#endif /* TCG_TARGET_EXTEND_ARGS */
op = tcg_emit_op(INDEX_op_call);
pi = 0;
TCGOp * op = tcg_emit_op(INDEX_op_call);
TCGOP_CALLO(op) = 0;
real_args = 0;
int is_64bit = sizemask & (1 << 2);
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
op->args[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
/* If stack grows up, then we will be placing successive
arguments at lower addresses, which means we need to
reverse the order compared to how we would normally
treat either big or little-endian. For those arguments
that will wind up in registers, this still works for
HPPA (the only current STACK_GROWSUP target) since the
argument registers are *also* allocated in decreasing
order. If another such target is added, this logic may
have to get more complicated to differentiate between
stack arguments and register arguments. */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
op->args[pi++] = temp_arg(arg + 1);
op->args[pi++] = temp_arg(arg);
#else
op->args[pi++] = temp_arg(arg);
op->args[pi++] = temp_arg(arg + 1);
#endif
real_args += 2;
}
op->args[pi++] = temp_arg(arg);
real_args++;
op->args[pi++] = (uintptr_t)&afl_maybe_log;
op->args[pi++] = flags;
TCGOP_CALLI(op) = real_args;
/* Make sure the fields didn't overflow. */
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* Free all of the parts we allocated above. */
real_args = 0;
int is_64bit = orig_sizemask & (1 << 2);
if (is_64bit) {
tcg_temp_free_internal(args[real_args++]);
tcg_temp_free_internal(args[real_args++]);
} else {
real_args++;
}
if (orig_sizemask & 1) {
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
Note that describing these as TCGv_i64 eliminates an unnecessary
zero-extension that tcg_gen_concat_i32_i64 would create. */
tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
tcg_temp_free_i64(retl);
tcg_temp_free_i64(reth);
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
int is_64bit = sizemask & (1 << 2);
if (!is_64bit) { tcg_temp_free_internal(arg); }
#endif /* TCG_TARGET_EXTEND_ARGS */
op->args[0] = (uintptr_t)func;
op->args[1] = 0;
TCGOP_CALLI(op) = 0;
}
/* Note: we convert the 64 bit args to 32 bit and do some alignment
and endian swap. Maybe it would be better to do the alignment
and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_afl_call0(void *func) {
int i, real_args, nb_rets, pi;
unsigned sizemask, flags;
TCGOp * op;
const int nargs = 0;
TCGTemp **args;
flags = 0;
sizemask = dh_sizemask(void, 0);
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* We have 64-bit values in one register, but need to pass as two
separate parameters. Split them. */
int orig_sizemask = sizemask;
int orig_nargs = nargs;
TCGv_i64 retl, reth;
TCGTemp *split_args[MAX_OPC_PARAM];
retl = NULL;
reth = NULL;
if (sizemask != 0) {
for (i = real_args = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
TCGv_i64 orig = temp_tcgv_i64(args[i]);
TCGv_i32 h = tcg_temp_new_i32();
TCGv_i32 l = tcg_temp_new_i32();
tcg_gen_extr_i64_i32(l, h, orig);
split_args[real_args++] = tcgv_i32_temp(h);
split_args[real_args++] = tcgv_i32_temp(l);
} else {
split_args[real_args++] = args[i];
}
}
nargs = real_args;
args = split_args;
sizemask = 0;
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
int is_signed = sizemask & (2 << (i + 1) * 2);
if (!is_64bit) {
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 orig = temp_tcgv_i64(args[i]);
if (is_signed) {
tcg_gen_ext32s_i64(temp, orig);
} else {
tcg_gen_ext32u_i64(temp, orig);
}
args[i] = tcgv_i64_temp(temp);
}
}
#endif /* TCG_TARGET_EXTEND_ARGS */
op = tcg_emit_op(INDEX_op_call);
pi = 0;
nb_rets = 0;
TCGOP_CALLO(op) = nb_rets;
real_args = 0;
for (i = 0; i < nargs; i++) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
op->args[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
/* If stack grows up, then we will be placing successive
arguments at lower addresses, which means we need to
reverse the order compared to how we would normally
treat either big or little-endian. For those arguments
that will wind up in registers, this still works for
HPPA (the only current STACK_GROWSUP target) since the
argument registers are *also* allocated in decreasing
order. If another such target is added, this logic may
have to get more complicated to differentiate between
stack arguments and register arguments. */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
op->args[pi++] = temp_arg(args[i] + 1);
op->args[pi++] = temp_arg(args[i]);
#else
op->args[pi++] = temp_arg(args[i]);
op->args[pi++] = temp_arg(args[i] + 1);
#endif
real_args += 2;
continue;
}
op->args[pi++] = temp_arg(args[i]);
real_args++;
}
op->args[pi++] = (uintptr_t)func;
op->args[pi++] = flags;
TCGOP_CALLI(op) = real_args;
/* Make sure the fields didn't overflow. */
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* Free all of the parts we allocated above. */
for (i = real_args = 0; i < orig_nargs; ++i) {
int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
tcg_temp_free_internal(args[real_args++]);
tcg_temp_free_internal(args[real_args++]);
} else {
real_args++;
}
}
if (orig_sizemask & 1) {
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
Note that describing these as TCGv_i64 eliminates an unnecessary
zero-extension that tcg_gen_concat_i32_i64 would create. */
tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
tcg_temp_free_i64(retl);
tcg_temp_free_i64(reth);
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (!is_64bit) { tcg_temp_free_internal(args[i]); }
}
#endif /* TCG_TARGET_EXTEND_ARGS */
}
void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv arg1,
TCGv arg2) {
int i, real_args, nb_rets, pi;
unsigned sizemask, flags;
TCGOp * op;
const int nargs = 3;
#if TARGET_LONG_BITS == 64
TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
tcgv_i64_temp(arg2)};
sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
dh_sizemask(i64, 3);
#else
TCGTemp *args[3] = {tcgv_i32_temp(tcg_const_tl(cur_loc)), tcgv_i32_temp(arg1),
tcgv_i32_temp(arg2)};
sizemask = dh_sizemask(void, 0) | dh_sizemask(i32, 1) | dh_sizemask(i32, 2) |
dh_sizemask(i32, 3);
#endif
flags = 0;
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* We have 64-bit values in one register, but need to pass as two
separate parameters. Split them. */
int orig_sizemask = sizemask;
int orig_nargs = nargs;
TCGv_i64 retl, reth;
TCGTemp *split_args[MAX_OPC_PARAM];
retl = NULL;
reth = NULL;
if (sizemask != 0) {
for (i = real_args = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
TCGv_i64 orig = temp_tcgv_i64(args[i]);
TCGv_i32 h = tcg_temp_new_i32();
TCGv_i32 l = tcg_temp_new_i32();
tcg_gen_extr_i64_i32(l, h, orig);
split_args[real_args++] = tcgv_i32_temp(h);
split_args[real_args++] = tcgv_i32_temp(l);
} else {
split_args[real_args++] = args[i];
}
}
nargs = real_args;
args = split_args;
sizemask = 0;
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
int is_signed = sizemask & (2 << (i + 1) * 2);
if (!is_64bit) {
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 orig = temp_tcgv_i64(args[i]);
if (is_signed) {
tcg_gen_ext32s_i64(temp, orig);
} else {
tcg_gen_ext32u_i64(temp, orig);
}
args[i] = tcgv_i64_temp(temp);
}
}
#endif /* TCG_TARGET_EXTEND_ARGS */
op = tcg_emit_op(INDEX_op_call);
pi = 0;
nb_rets = 0;
TCGOP_CALLO(op) = nb_rets;
real_args = 0;
for (i = 0; i < nargs; i++) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
op->args[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
/* If stack grows up, then we will be placing successive
arguments at lower addresses, which means we need to
reverse the order compared to how we would normally
treat either big or little-endian. For those arguments
that will wind up in registers, this still works for
HPPA (the only current STACK_GROWSUP target) since the
argument registers are *also* allocated in decreasing
order. If another such target is added, this logic may
have to get more complicated to differentiate between
stack arguments and register arguments. */
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
op->args[pi++] = temp_arg(args[i] + 1);
op->args[pi++] = temp_arg(args[i]);
#else
op->args[pi++] = temp_arg(args[i]);
op->args[pi++] = temp_arg(args[i] + 1);
#endif
real_args += 2;
continue;
}
op->args[pi++] = temp_arg(args[i]);
real_args++;
}
op->args[pi++] = (uintptr_t)func;
op->args[pi++] = flags;
TCGOP_CALLI(op) = real_args;
/* Make sure the fields didn't overflow. */
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* Free all of the parts we allocated above. */
for (i = real_args = 0; i < orig_nargs; ++i) {
int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
tcg_temp_free_internal(args[real_args++]);
tcg_temp_free_internal(args[real_args++]);
} else {
real_args++;
}
}
if (orig_sizemask & 1) {
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
Note that describing these as TCGv_i64 eliminates an unnecessary
zero-extension that tcg_gen_concat_i32_i64 would create. */
tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
tcg_temp_free_i64(retl);
tcg_temp_free_i64(reth);
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (!is_64bit) { tcg_temp_free_internal(args[i]); }
}
#endif /* TCG_TARGET_EXTEND_ARGS */
}

View File

@ -34,7 +34,7 @@
#include "afl-qemu-common.h"
#include "tcg-op.h"
void afl_maybe_log(target_ulong cur_loc) {
void HELPER(afl_maybe_log)(target_ulong cur_loc) {
register uintptr_t afl_idx = cur_loc ^ afl_prev_loc;
@ -67,7 +67,9 @@ static void afl_gen_trace(target_ulong cur_loc) {
if (cur_loc >= afl_inst_rms) return;
tcg_gen_afl_maybe_log_call(cur_loc);
TCGv cur_loc_v = tcg_const_tl(cur_loc);
gen_helper_afl_maybe_log(cur_loc_v);
tcg_temp_free(cur_loc_v);
}

View File

@ -1,10 +1,17 @@
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index 1bd39d13..944997ee 100644
index 1bd39d13..c58dee31 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -260,3 +260,5 @@ DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -260,3 +260,12 @@ DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_1(afl_entry_routine, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(afl_maybe_log, TCG_CALL_NO_RWG, void, tl)
+DEF_HELPER_FLAGS_3(afl_compcov_16, TCG_CALL_NO_RWG, void, tl, tl, tl)
+DEF_HELPER_FLAGS_3(afl_compcov_32, TCG_CALL_NO_RWG, void, tl, tl, tl)
+DEF_HELPER_FLAGS_3(afl_compcov_64, TCG_CALL_NO_RWG, void, tl, tl, tl)
+DEF_HELPER_FLAGS_3(afl_cmplog_16, TCG_CALL_NO_RWG, void, tl, tl, tl)
+DEF_HELPER_FLAGS_3(afl_cmplog_32, TCG_CALL_NO_RWG, void, tl, tl, tl)
+DEF_HELPER_FLAGS_3(afl_cmplog_64, TCG_CALL_NO_RWG, void, tl, tl, tl)