mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-19 13:03:44 +00:00
182 lines
6.2 KiB
Diff
182 lines
6.2 KiB
Diff
diff --git a/tcg/tcg.c b/tcg/tcg.c
|
|
index e85133ef..54b9b390 100644
|
|
--- a/tcg/tcg.c
|
|
+++ b/tcg/tcg.c
|
|
@@ -1612,6 +1612,176 @@ bool tcg_op_supported(TCGOpcode op)
|
|
}
|
|
}
|
|
|
|
+
|
|
+/* Call the instrumentation function from the TCG IR */
|
|
+void tcg_gen_afl_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
|
|
+{
|
|
+ int i, real_args, nb_rets, pi;
|
|
+ unsigned sizemask, flags;
|
|
+ TCGOp *op;
|
|
+
|
|
+ flags = 0;
|
|
+ sizemask = 0;
|
|
+
|
|
+#if defined(__sparc__) && !defined(__arch64__) \
|
|
+ && !defined(CONFIG_TCG_INTERPRETER)
|
|
+ /* We have 64-bit values in one register, but need to pass as two
|
|
+ separate parameters. Split them. */
|
|
+ int orig_sizemask = sizemask;
|
|
+ int orig_nargs = nargs;
|
|
+ TCGv_i64 retl, reth;
|
|
+ TCGTemp *split_args[MAX_OPC_PARAM];
|
|
+
|
|
+ retl = NULL;
|
|
+ reth = NULL;
|
|
+ if (sizemask != 0) {
|
|
+ for (i = real_args = 0; i < nargs; ++i) {
|
|
+ int is_64bit = sizemask & (1 << (i+1)*2);
|
|
+ if (is_64bit) {
|
|
+ TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
|
+ TCGv_i32 h = tcg_temp_new_i32();
|
|
+ TCGv_i32 l = tcg_temp_new_i32();
|
|
+ tcg_gen_extr_i64_i32(l, h, orig);
|
|
+ split_args[real_args++] = tcgv_i32_temp(h);
|
|
+ split_args[real_args++] = tcgv_i32_temp(l);
|
|
+ } else {
|
|
+ split_args[real_args++] = args[i];
|
|
+ }
|
|
+ }
|
|
+ nargs = real_args;
|
|
+ args = split_args;
|
|
+ sizemask = 0;
|
|
+ }
|
|
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
|
+ for (i = 0; i < nargs; ++i) {
|
|
+ int is_64bit = sizemask & (1 << (i+1)*2);
|
|
+ int is_signed = sizemask & (2 << (i+1)*2);
|
|
+ if (!is_64bit) {
|
|
+ TCGv_i64 temp = tcg_temp_new_i64();
|
|
+ TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
|
+ if (is_signed) {
|
|
+ tcg_gen_ext32s_i64(temp, orig);
|
|
+ } else {
|
|
+ tcg_gen_ext32u_i64(temp, orig);
|
|
+ }
|
|
+ args[i] = tcgv_i64_temp(temp);
|
|
+ }
|
|
+ }
|
|
+#endif /* TCG_TARGET_EXTEND_ARGS */
|
|
+
|
|
+ op = tcg_emit_op(INDEX_op_call);
|
|
+
|
|
+ pi = 0;
|
|
+ if (ret != NULL) {
|
|
+#if defined(__sparc__) && !defined(__arch64__) \
|
|
+ && !defined(CONFIG_TCG_INTERPRETER)
|
|
+ if (orig_sizemask & 1) {
|
|
+ /* The 32-bit ABI is going to return the 64-bit value in
|
|
+ the %o0/%o1 register pair. Prepare for this by using
|
|
+ two return temporaries, and reassemble below. */
|
|
+ retl = tcg_temp_new_i64();
|
|
+ reth = tcg_temp_new_i64();
|
|
+ op->args[pi++] = tcgv_i64_arg(reth);
|
|
+ op->args[pi++] = tcgv_i64_arg(retl);
|
|
+ nb_rets = 2;
|
|
+ } else {
|
|
+ op->args[pi++] = temp_arg(ret);
|
|
+ nb_rets = 1;
|
|
+ }
|
|
+#else
|
|
+ if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
|
|
+#ifdef HOST_WORDS_BIGENDIAN
|
|
+ op->args[pi++] = temp_arg(ret + 1);
|
|
+ op->args[pi++] = temp_arg(ret);
|
|
+#else
|
|
+ op->args[pi++] = temp_arg(ret);
|
|
+ op->args[pi++] = temp_arg(ret + 1);
|
|
+#endif
|
|
+ nb_rets = 2;
|
|
+ } else {
|
|
+ op->args[pi++] = temp_arg(ret);
|
|
+ nb_rets = 1;
|
|
+ }
|
|
+#endif
|
|
+ } else {
|
|
+ nb_rets = 0;
|
|
+ }
|
|
+ TCGOP_CALLO(op) = nb_rets;
|
|
+
|
|
+ real_args = 0;
|
|
+ for (i = 0; i < nargs; i++) {
|
|
+ int is_64bit = sizemask & (1 << (i+1)*2);
|
|
+ if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
|
|
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
|
|
+ /* some targets want aligned 64 bit args */
|
|
+ if (real_args & 1) {
|
|
+ op->args[pi++] = TCG_CALL_DUMMY_ARG;
|
|
+ real_args++;
|
|
+ }
|
|
+#endif
|
|
+ /* If stack grows up, then we will be placing successive
|
|
+ arguments at lower addresses, which means we need to
|
|
+ reverse the order compared to how we would normally
|
|
+ treat either big or little-endian. For those arguments
|
|
+ that will wind up in registers, this still works for
|
|
+ HPPA (the only current STACK_GROWSUP target) since the
|
|
+ argument registers are *also* allocated in decreasing
|
|
+ order. If another such target is added, this logic may
|
|
+ have to get more complicated to differentiate between
|
|
+ stack arguments and register arguments. */
|
|
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
|
|
+ op->args[pi++] = temp_arg(args[i] + 1);
|
|
+ op->args[pi++] = temp_arg(args[i]);
|
|
+#else
|
|
+ op->args[pi++] = temp_arg(args[i]);
|
|
+ op->args[pi++] = temp_arg(args[i] + 1);
|
|
+#endif
|
|
+ real_args += 2;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ op->args[pi++] = temp_arg(args[i]);
|
|
+ real_args++;
|
|
+ }
|
|
+ op->args[pi++] = (uintptr_t)func;
|
|
+ op->args[pi++] = flags;
|
|
+ TCGOP_CALLI(op) = real_args;
|
|
+
|
|
+ /* Make sure the fields didn't overflow. */
|
|
+ tcg_debug_assert(TCGOP_CALLI(op) == real_args);
|
|
+ tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
|
|
+
|
|
+#if defined(__sparc__) && !defined(__arch64__) \
|
|
+ && !defined(CONFIG_TCG_INTERPRETER)
|
|
+ /* Free all of the parts we allocated above. */
|
|
+ for (i = real_args = 0; i < orig_nargs; ++i) {
|
|
+ int is_64bit = orig_sizemask & (1 << (i+1)*2);
|
|
+ if (is_64bit) {
|
|
+ tcg_temp_free_internal(args[real_args++]);
|
|
+ tcg_temp_free_internal(args[real_args++]);
|
|
+ } else {
|
|
+ real_args++;
|
|
+ }
|
|
+ }
|
|
+ if (orig_sizemask & 1) {
|
|
+ /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
|
|
+ Note that describing these as TCGv_i64 eliminates an unnecessary
|
|
+ zero-extension that tcg_gen_concat_i32_i64 would create. */
|
|
+ tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
|
|
+ tcg_temp_free_i64(retl);
|
|
+ tcg_temp_free_i64(reth);
|
|
+ }
|
|
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
|
+ for (i = 0; i < nargs; ++i) {
|
|
+ int is_64bit = sizemask & (1 << (i+1)*2);
|
|
+ if (!is_64bit) {
|
|
+ tcg_temp_free_internal(args[i]);
|
|
+ }
|
|
+ }
|
|
+#endif /* TCG_TARGET_EXTEND_ARGS */
|
|
+}
|
|
+
|
|
+
|
|
/* Note: we convert the 64 bit args to 32 bit and do some alignment
|
|
and endian swap. Maybe it would be better to do the alignment
|
|
and endian swap in tcg_reg_alloc_call(). */
|