Files
AFLplusplus/qemu_mode/patches/cpu-exec.diff
Andrea Fioraldi 5ebb0df444 more stability
2020-07-04 10:44:38 +02:00

60 lines
2.0 KiB
Diff

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 870027d4..a164388a 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -36,6 +36,8 @@
#include "sysemu/cpus.h"
#include "sysemu/replay.h"
+#include "../patches/afl-qemu-cpu-inl.h"
+
/* -icount align implementation. */
typedef struct SyncClocks {
@@ -397,11 +399,13 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ bool was_translated = false, was_chained = false;
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
+ was_translated = true;
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -416,8 +420,30 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
}
#endif
/* See if we can patch the calling TB. */
+ unsigned long edge_id = 0;
if (last_tb) {
- tb_add_jump(last_tb, tb_exit, tb);
+
+ if (last_tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID &&
+ ((afl_start_code < tb->pc && afl_end_code > tb->pc) ||
+ (afl_start_code < last_tb->pc && afl_end_code > last_tb->pc))) {
+ mmap_lock();
+ edge_id = ++afl_edges_counter;
+ TranslationBlock *edge = afl_gen_edge(cpu, edge_id);
+ mmap_unlock();
+
+ target_ulong afl_idx = edge_id & (MAP_SIZE -1);
+ INC_AFL_AREA(afl_idx);
+
+ tb_add_jump(last_tb, tb_exit, edge);
+ tb_add_jump(edge, 0, tb);
+ } else {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
+ was_chained = true;
+ }
+ if (was_translated || was_chained) {
+ afl_request_tsl(pc, cs_base, flags, cf_mask, was_chained ? last_tb : NULL, tb_exit, edge_id);
}
return tb;
}