Compare commits

...

5 Commits

Author SHA1 Message Date
5ebb0df444 more stability 2020-07-04 10:44:38 +02:00
0eca7a4f73 small fix 2020-07-03 23:51:33 +02:00
1bee082739 notify afl-fuzz about variadic map 2020-07-03 23:41:02 +02:00
bac6c7891d variadic map size 2020-07-03 23:34:18 +02:00
8b065725cc edges instrumentation at branch link time 2020-07-03 23:07:35 +02:00
8 changed files with 168 additions and 48 deletions

View File

@ -80,10 +80,13 @@ typedef struct afl_forkserver {
u8 qemu_mode; /* if running in qemu mode or not */
u8 variadic_map_size; /* update map_size after each exec */
u32 *shmem_fuzz_len; /* length of the fuzzing test case */
u8 *shmem_fuzz; /* allocated memory for fuzzing */
char *cmplog_binary; /* the name of the cmplog binary */
/* Function to kick off the forkserver child */

View File

@ -48,6 +48,7 @@ typedef uint32_t u32;
#define FS_OPT_SNAPSHOT 0x20000000
#define FS_OPT_AUTODICT 0x10000000
#define FS_OPT_SHDMEM_FUZZ 0x01000000
#define FS_OPT_VARIADIC_MAP 0x02000000
#define FS_OPT_OLD_AFLPP_WORKAROUND 0x0f000000
// FS_OPT_MAX_MAPSIZE is 8388608 = 0x800000 = 2^23 = 1 << 22
#define FS_OPT_MAX_MAPSIZE ((0x00fffffe >> 1) + 1)

View File

@ -91,12 +91,16 @@ extern afl_persistent_hook_fn afl_persistent_hook_ptr;
extern __thread abi_ulong afl_prev_loc;
extern unsigned long afl_edges_counter;
extern struct cmp_map *__afl_cmp_map;
extern __thread u32 __afl_cmp_counter;
void afl_setup(void);
void afl_forkserver(CPUState *cpu);
TranslationBlock *afl_gen_edge(CPUState *cpu, unsigned long edge_id);
// void afl_debug_dump_saved_regs(void);
void afl_persistent_loop(void);

View File

@ -31,8 +31,10 @@
*/
#include <sys/shm.h>
#include "afl-qemu-common.h"
#include "tcg-op.h"
#include <sys/shm.h>
#ifndef AFL_QEMU_STATIC_BUILD
#include <dlfcn.h>
@ -89,6 +91,8 @@ u8 sharedmem_fuzzing;
afl_persistent_hook_fn afl_persistent_hook_ptr;
unsigned long afl_edges_counter = sizeof(unsigned);
/* Instrumentation ratio: */
unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */
@ -97,7 +101,7 @@ unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */
static void afl_wait_tsl(CPUState *, int);
static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t,
TranslationBlock *, int);
TranslationBlock *, int, unsigned long);
/* Data structures passed around by the translate handlers: */
@ -122,6 +126,7 @@ struct afl_chain {
struct afl_tb last_tb;
uint32_t cf_mask;
int tb_exit;
unsigned long edge_id;
};
@ -343,6 +348,7 @@ void afl_forkserver(CPUState *cpu) {
// with the max ID value
if (MAP_SIZE <= FS_OPT_MAX_MAPSIZE)
status |= (FS_OPT_SET_MAPSIZE(MAP_SIZE) | FS_OPT_MAPSIZE);
status |= FS_OPT_VARIADIC_MAP;
if (sharedmem_fuzzing != 0) status |= FS_OPT_SHDMEM_FUZZ;
if (status) status |= (FS_OPT_ENABLED);
if (getenv("AFL_DEBUG"))
@ -352,6 +358,8 @@ void afl_forkserver(CPUState *cpu) {
/* Tell the parent that we're alive. If the parent doesn't want
to talk, assume that we're not running in forkserver mode. */
*(unsigned*)afl_area_ptr = ((afl_edges_counter + 7) & (-8));
if (write(FORKSRV_FD + 1, tmp, 4) != 4) return;
afl_forksrv_pid = getpid();
@ -458,6 +466,8 @@ void afl_forkserver(CPUState *cpu) {
}
first_run = 0;
*(unsigned*)afl_area_ptr = ((afl_edges_counter + 8) & (-8));
if (write(FORKSRV_FD + 1, &status, 4) != 4) exit(7);
@ -534,7 +544,7 @@ void afl_persistent_loop(void) {
static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags,
uint32_t cf_mask, TranslationBlock *last_tb,
int tb_exit) {
int tb_exit, unsigned long edge_id) {
if (disable_caching) return;
@ -559,6 +569,7 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags,
c.last_tb.flags = last_tb->flags;
c.cf_mask = cf_mask;
c.tb_exit = tb_exit;
c.edge_id = edge_id;
if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
return;
@ -620,7 +631,22 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
c.last_tb.flags, c.cf_mask);
if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); }
if (last_tb) {
if (c.edge_id && ((afl_start_code < tb->pc && afl_end_code > tb->pc) || (afl_start_code < last_tb->pc && afl_end_code > last_tb->pc))) {
mmap_lock();
afl_edges_counter = MAX(c.edge_id, afl_edges_counter);
TranslationBlock *tb_edge = afl_gen_edge(cpu, c.edge_id);
mmap_unlock();
tb_add_jump(last_tb, c.tb_exit, tb_edge);
tb_add_jump(tb_edge, 0, tb);
} else
tb_add_jump(last_tb, c.tb_exit, tb);
}
}
@ -631,4 +657,3 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
close(fd);
}

View File

@ -34,42 +34,104 @@
#include "afl-qemu-common.h"
#include "tcg-op.h"
void HELPER(afl_maybe_log)(target_ulong cur_loc) {
register uintptr_t afl_idx = cur_loc ^ afl_prev_loc;
void HELPER(afl_maybe_log)(target_ulong afl_idx) {
INC_AFL_AREA(afl_idx);
afl_prev_loc = cur_loc >> 1;
}
/* Generates TCG code for AFL's tracing instrumentation. */
static void afl_gen_trace(target_ulong cur_loc) {
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *afl_gen_edge(CPUState *cpu, unsigned long edge_id)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
/* Optimize for cur_loc > afl_end_code, which is the most likely case on
Linux systems. */
assert_memory_lock();
if (cur_loc > afl_end_code ||
cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of
// static dummy buffer
return;
buffer_overflow:
tb = tb_alloc(0);
if (unlikely(!tb)) {
/* flush must be done */
tb_flush(cpu);
mmap_unlock();
/* Make the execution loop process the flush as soon as possible. */
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
/* Looks like QEMU always maps to fixed locations, so ASLR is not a
concern. Phew. But instruction addresses may be aligned. Let's mangle
the value to get something quasi-uniform. */
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = gen_code_buf;
tb->pc = 0;
tb->cs_base = 0;
tb->flags = 0;
tb->cflags = 0;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tcg_ctx->tb_cflags = 0;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;
tcg_func_start(tcg_ctx);
/* Implement probabilistic instrumentation by looking at scrambled block
address. This keeps the instrumented locations stable across runs. */
tcg_ctx->cpu = ENV_GET_CPU(env);
target_ulong afl_idx = edge_id & (MAP_SIZE -1);
TCGv tmp0 = tcg_const_tl(afl_idx);
gen_helper_afl_maybe_log(tmp0);
tcg_temp_free(tmp0);
tcg_gen_goto_tb(0);
tcg_gen_exit_tb(tb, 0);
if (cur_loc >= afl_inst_rms) return;
tcg_ctx->cpu = NULL;
TCGv cur_loc_v = tcg_const_tl(cur_loc);
gen_helper_afl_maybe_log(cur_loc_v);
tcg_temp_free(cur_loc_v);
trace_translate_block(tb, tb->pc, tb->tc.ptr);
/* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
if (TCG_TARGET_HAS_direct_jump) {
tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
tcg_ctx->tb_jmp_target_addr = NULL;
} else {
tcg_ctx->tb_jmp_insn_offset = NULL;
tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
}
/* ??? Overflow could be handled better here. In particular, we
don't need to re-do gen_intermediate_code, nor should we re-do
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
}
tb->tc.size = gen_code_size;
atomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN));
/* init jump list */
qemu_spin_init(&tb->jmp_lock);
tb->jmp_list_head = (uintptr_t)NULL;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
tb->jmp_dest[0] = (uintptr_t)NULL;
tb->jmp_dest[1] = (uintptr_t)NULL;
/* init original jump addresses which have been set during tcg_gen_code() */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 0);
}
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 1);
}
return tb;
}

View File

@ -1,5 +1,5 @@
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 870027d4..0bc87dfc 100644
index 870027d4..a164388a 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -36,6 +36,8 @@
@ -25,14 +25,35 @@ index 870027d4..0bc87dfc 100644
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -418,6 +422,10 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
@@ -416,8 +420,30 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
}
#endif
/* See if we can patch the calling TB. */
+ unsigned long edge_id = 0;
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
- tb_add_jump(last_tb, tb_exit, tb);
+
+ if (last_tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID &&
+ ((afl_start_code < tb->pc && afl_end_code > tb->pc) ||
+ (afl_start_code < last_tb->pc && afl_end_code > last_tb->pc))) {
+ mmap_lock();
+ edge_id = ++afl_edges_counter;
+ TranslationBlock *edge = afl_gen_edge(cpu, edge_id);
+ mmap_unlock();
+
+ target_ulong afl_idx = edge_id & (MAP_SIZE -1);
+ INC_AFL_AREA(afl_idx);
+
+ tb_add_jump(last_tb, tb_exit, edge);
+ tb_add_jump(edge, 0, tb);
+ } else {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+
+ was_chained = true;
+ }
+ if (was_translated || was_chained) {
+ afl_request_tsl(pc, cs_base, flags, cf_mask, was_chained ? last_tb : NULL, tb_exit);
+ afl_request_tsl(pc, cs_base, flags, cf_mask, was_chained ? last_tb : NULL, tb_exit, edge_id);
}
return tb;
}

View File

@ -1,21 +1,13 @@
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 639f0b27..21a45494 100644
index 639f0b27..8fa235ec 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -59,6 +59,8 @@
#include "exec/log.h"
#include "sysemu/cpus.h"
@@ -1667,6 +1667,8 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
return tb;
}
+#include "../patches/afl-qemu-translate-inl.h"
+
/* #define DEBUG_TB_INVALIDATE */
/* #define DEBUG_TB_FLUSH */
/* make various TB consistency checks */
@@ -1721,6 +1723,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = ENV_GET_CPU(env);
+ afl_gen_trace(pc);
gen_intermediate_code(cpu, tb);
tcg_ctx->cpu = NULL;
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,

View File

@ -618,6 +618,13 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
fsrv->map_size = tmp_map_size;
}
if ((status & FS_OPT_VARIADIC_MAP) == FS_OPT_VARIADIC_MAP) {
ACTF("Using variadic map size.");
fsrv->variadic_map_size = 1;
}
if ((status & FS_OPT_AUTODICT) == FS_OPT_AUTODICT) {
@ -1045,6 +1052,11 @@ fsrv_run_result_t afl_fsrv_run_target(afl_forkserver_t *fsrv, u32 timeout,
behave very normally and do not have to be treated as volatile. */
MEM_BARRIER();
if (fsrv->variadic_map_size) {
fsrv->map_size = *(unsigned*)fsrv->trace_bits;
*(unsigned*)fsrv->trace_bits = 0;
}
/* Report outcome to caller. */